Skip to content

Commit b8c03b7

Browse files
rpptakpm00
authored andcommitted
userfaultfd: introduce mfill_get_vma() and mfill_put_vma()
Split the code that finds, locks and verifies VMA from mfill_atomic() into a helper function. This function will be used later during refactoring of mfill_atomic_pte_copy(). Add a counterpart mfill_put_vma() helper that unlocks the VMA and releases map_changing_lock. [avagin@google.com: fix lock leak in mfill_get_vma()] Link: https://lore.kernel.org/20260316173829.1126728-1-avagin@google.com Link: https://lore.kernel.org/20260402041156.1377214-5-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Signed-off-by: Andrei Vagin <avagin@google.com> Reviewed-by: Harry Yoo (Oracle) <harry@kernel.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: David Hildenbrand (Arm) <david@kernel.org> Cc: Harry Yoo <harry.yoo@oracle.com> Cc: Hugh Dickins <hughd@google.com> Cc: James Houghton <jthoughton@google.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Nikita Kalyazin <kalyazin@amazon.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: David Carlier <devnexen@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent e2e0b82 commit b8c03b7

1 file changed

Lines changed: 75 additions & 50 deletions

File tree

mm/userfaultfd.c

Lines changed: 75 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,75 @@ static void uffd_mfill_unlock(struct vm_area_struct *vma)
157157
}
158158
#endif
159159

160+
static void mfill_put_vma(struct mfill_state *state)
161+
{
162+
if (!state->vma)
163+
return;
164+
165+
up_read(&state->ctx->map_changing_lock);
166+
uffd_mfill_unlock(state->vma);
167+
state->vma = NULL;
168+
}
169+
170+
static int mfill_get_vma(struct mfill_state *state)
171+
{
172+
struct userfaultfd_ctx *ctx = state->ctx;
173+
uffd_flags_t flags = state->flags;
174+
struct vm_area_struct *dst_vma;
175+
int err;
176+
177+
/*
178+
* Make sure the vma is not shared, that the dst range is
179+
* both valid and fully within a single existing vma.
180+
*/
181+
dst_vma = uffd_mfill_lock(ctx->mm, state->dst_start, state->len);
182+
if (IS_ERR(dst_vma))
183+
return PTR_ERR(dst_vma);
184+
185+
/*
186+
* If memory mappings are changing because of non-cooperative
187+
* operation (e.g. mremap) running in parallel, bail out and
188+
* request the user to retry later
189+
*/
190+
down_read(&ctx->map_changing_lock);
191+
state->vma = dst_vma;
192+
err = -EAGAIN;
193+
if (atomic_read(&ctx->mmap_changing))
194+
goto out_unlock;
195+
196+
err = -EINVAL;
197+
198+
/*
199+
* shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
200+
* it will overwrite vm_ops, so vma_is_anonymous must return false.
201+
*/
202+
if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
203+
dst_vma->vm_flags & VM_SHARED))
204+
goto out_unlock;
205+
206+
/*
207+
* validate 'mode' now that we know the dst_vma: don't allow
208+
* a wrprotect copy if the userfaultfd didn't register as WP.
209+
*/
210+
if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
211+
goto out_unlock;
212+
213+
if (is_vm_hugetlb_page(dst_vma))
214+
return 0;
215+
216+
if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
217+
goto out_unlock;
218+
if (!vma_is_shmem(dst_vma) &&
219+
uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
220+
goto out_unlock;
221+
222+
return 0;
223+
224+
out_unlock:
225+
mfill_put_vma(state);
226+
return err;
227+
}
228+
160229
static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
161230
{
162231
pgd_t *pgd;
@@ -767,8 +836,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
767836
.src_addr = src_start,
768837
.dst_addr = dst_start,
769838
};
770-
struct mm_struct *dst_mm = ctx->mm;
771-
struct vm_area_struct *dst_vma;
772839
long copied = 0;
773840
ssize_t err;
774841

@@ -783,56 +850,17 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
783850
VM_WARN_ON_ONCE(dst_start + len <= dst_start);
784851

785852
retry:
786-
/*
787-
* Make sure the vma is not shared, that the dst range is
788-
* both valid and fully within a single existing vma.
789-
*/
790-
dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
791-
if (IS_ERR(dst_vma)) {
792-
err = PTR_ERR(dst_vma);
853+
err = mfill_get_vma(&state);
854+
if (err)
793855
goto out;
794-
}
795-
state.vma = dst_vma;
796-
797-
/*
798-
* If memory mappings are changing because of non-cooperative
799-
* operation (e.g. mremap) running in parallel, bail out and
800-
* request the user to retry later
801-
*/
802-
down_read(&ctx->map_changing_lock);
803-
err = -EAGAIN;
804-
if (atomic_read(&ctx->mmap_changing))
805-
goto out_unlock;
806-
807-
err = -EINVAL;
808-
/*
809-
* shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
810-
* it will overwrite vm_ops, so vma_is_anonymous must return false.
811-
*/
812-
if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
813-
dst_vma->vm_flags & VM_SHARED))
814-
goto out_unlock;
815-
816-
/*
817-
* validate 'mode' now that we know the dst_vma: don't allow
818-
* a wrprotect copy if the userfaultfd didn't register as WP.
819-
*/
820-
if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
821-
goto out_unlock;
822856

823857
/*
824858
* If this is a HUGETLB vma, pass off to appropriate routine
825859
*/
826-
if (is_vm_hugetlb_page(dst_vma))
827-
return mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
860+
if (is_vm_hugetlb_page(state.vma))
861+
return mfill_atomic_hugetlb(ctx, state.vma, dst_start,
828862
src_start, len, flags);
829863

830-
if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
831-
goto out_unlock;
832-
if (!vma_is_shmem(dst_vma) &&
833-
uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
834-
goto out_unlock;
835-
836864
while (state.src_addr < src_start + len) {
837865
VM_WARN_ON_ONCE(state.dst_addr >= dst_start + len);
838866

@@ -851,8 +879,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
851879
if (unlikely(err == -ENOENT)) {
852880
void *kaddr;
853881

854-
up_read(&ctx->map_changing_lock);
855-
uffd_mfill_unlock(state.vma);
882+
mfill_put_vma(&state);
856883
VM_WARN_ON_ONCE(!state.folio);
857884

858885
kaddr = kmap_local_folio(state.folio, 0);
@@ -881,9 +908,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
881908
break;
882909
}
883910

884-
out_unlock:
885-
up_read(&ctx->map_changing_lock);
886-
uffd_mfill_unlock(state.vma);
911+
mfill_put_vma(&state);
887912
out:
888913
if (state.folio)
889914
folio_put(state.folio);

0 commit comments

Comments
 (0)