[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH 3/3 gnumach] kern/gsync: Use vm_map_lookup with exit_map_lock
From: |
Samuel Thibault |
Subject: |
Re: [PATCH 3/3 gnumach] kern/gsync: Use vm_map_lookup with exit_map_locked as appropriate |
Date: |
Wed, 21 Feb 2024 16:46:00 +0100 |
User-agent: |
NeoMutt/20170609 (1.8.3) |
Damien Zammit, le mer. 21 févr. 2024 13:46:10 +0000, a ecrit:
> This refactors gsync functions so that the read lock on vm map
> is only taken once and extended throughout appropriate calls.
Please also explain here the "why": the deadlock scenario that was found
otherwise.
> Co-Authored-By: Sergey Bugaev <bugaevc@gmail.com>
> ---
> kern/gsync.c | 28 +++++++++++++++-------------
> 1 file changed, 15 insertions(+), 13 deletions(-)
>
> diff --git a/kern/gsync.c b/kern/gsync.c
> index 19190349..083f90b9 100644
> --- a/kern/gsync.c
> +++ b/kern/gsync.c
> @@ -126,7 +126,7 @@ gsync_key_hash (const union gsync_key *keyp)
> * in VAP. Returns 0 if successful, -1 otherwise. */
> static int
> probe_address (vm_map_t map, vm_offset_t addr,
> - int flags, struct vm_args *vap)
> + int flags, boolean_t exit_map_locked, struct vm_args *vap)
> {
> vm_prot_t prot = VM_PROT_READ |
> ((flags & GSYNC_MUTATE) ? VM_PROT_WRITE : 0);
> @@ -134,11 +134,17 @@ probe_address (vm_map_t map, vm_offset_t addr,
> vm_prot_t rprot;
> boolean_t wired_p;
>
> - if (vm_map_lookup (&map, addr, prot, FALSE, &ver,
> + if (vm_map_lookup (&map, addr, prot, exit_map_locked, &ver,
> &vap->obj, &vap->off, &rprot, &wired_p) != KERN_SUCCESS)
> - return (-1);
> + {
> + if (exit_map_locked)
> + vm_map_unlock_read (map);
In the case of a failure, vm_map_lookup should rather not keep the map
locked. The caller very mostly cannot continue anyway.
> + return (-1);
> + }
> else if ((rprot & prot) != prot)
> {
> + if (exit_map_locked)
> + vm_map_unlock_read (map);
> vm_object_unlock (vap->obj);
> return (-1);
> }
> @@ -151,9 +157,9 @@ probe_address (vm_map_t map, vm_offset_t addr,
> * the argument VAP for future use. */
> static int
> gsync_prepare_key (task_t task, vm_offset_t addr, int flags,
> - union gsync_key *keyp, struct vm_args *vap)
> + boolean_t exit_map_locked, union gsync_key *keyp, struct vm_args *vap)
> {
> - if (probe_address (task->map, addr, flags, vap) < 0)
> + if (probe_address (task->map, addr, flags, exit_map_locked, vap) < 0)
> return (-1);
> else if (flags & GSYNC_SHARED)
> {
> @@ -227,12 +233,10 @@ kern_return_t gsync_wait (task_t task, vm_offset_t addr,
> else if (addr % sizeof (int) != 0)
> return (KERN_INVALID_ADDRESS);
>
> - vm_map_lock_read (task->map);
> -
> struct gsync_waiter w;
> struct vm_args va;
> boolean_t remote = task != current_task ();
> - int bucket = gsync_prepare_key (task, addr, flags, &w.key, &va);
> + int bucket = gsync_prepare_key (task, addr, flags, TRUE, &w.key, &va);
>
> if (bucket < 0)
> {
> @@ -354,11 +358,9 @@ kern_return_t gsync_wake (task_t task,
> else if (addr % sizeof (int) != 0)
> return (KERN_INVALID_ADDRESS);
>
> - vm_map_lock_read (task->map);
> -
> union gsync_key key;
> struct vm_args va;
> - int bucket = gsync_prepare_key (task, addr, flags, &key, &va);
> + int bucket = gsync_prepare_key (task, addr, flags, TRUE, &key, &va);
>
> if (bucket < 0)
> {
> @@ -434,14 +436,14 @@ kern_return_t gsync_requeue (task_t task, vm_offset_t
> src,
> union gsync_key src_k, dst_k;
> struct vm_args va;
>
> - int src_bkt = gsync_prepare_key (task, src, flags, &src_k, &va);
> + int src_bkt = gsync_prepare_key (task, src, flags, FALSE, &src_k, &va);
> if (src_bkt < 0)
> return (KERN_INVALID_ADDRESS);
>
> /* Unlock the VM object before the second lookup. */
> vm_object_unlock (va.obj);
>
> - int dst_bkt = gsync_prepare_key (task, dst, flags, &dst_k, &va);
> + int dst_bkt = gsync_prepare_key (task, dst, flags, FALSE, &dst_k, &va);
> if (dst_bkt < 0)
> return (KERN_INVALID_ADDRESS);
I don't think it's worth adding the exit_map_locked parameter to
gsync_prepare_key: for gsync_requeue we can as well just release the
read lock, that'll keep the gsync_prepare_key code simpler.
Samuel