Skip to content

Commit ad9ac30

Browse files
rpptakpm00
authored andcommitted
userfaultfd: introduce vm_uffd_ops->alloc_folio()
and use it to refactor mfill_atomic_pte_zeroed_folio() and mfill_atomic_pte_copy(). mfill_atomic_pte_zeroed_folio() and mfill_atomic_pte_copy() perform almost identical actions: * allocate a folio * update folio contents (either copy from userspace of fill with zeros) * update page tables with the new folio Split a __mfill_atomic_pte() helper that handles both cases and uses newly introduced vm_uffd_ops->alloc_folio() to allocate the folio. Pass the ops structure from the callers to __mfill_atomic_pte() to later allow using anon_uffd_ops for MAP_PRIVATE mappings of file-backed VMAs. Note, that the new ops method is called alloc_folio() rather than folio_alloc() to avoid clash with alloc_tag macro folio_alloc(). Link: https://lore.kernel.org/20260402041156.1377214-10-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reviewed-by: James Houghton <jthoughton@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andrei Vagin <avagin@google.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: David Hildenbrand (Arm) <david@kernel.org> Cc: Harry Yoo <harry.yoo@oracle.com> Cc: Harry Yoo (Oracle) <harry@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Nikita Kalyazin <kalyazin@amazon.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: David Carlier <devnexen@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent dfc4d77 commit ad9ac30

2 files changed

Lines changed: 54 additions & 44 deletions

File tree

include/linux/userfaultfd_k.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,12 @@ struct vm_uffd_ops {
9494
* The returned folio is locked and with reference held.
9595
*/
9696
struct folio *(*get_folio_noalloc)(struct inode *inode, pgoff_t pgoff);
97+
/*
98+
* Called during resolution of UFFDIO_COPY request.
99+
* Should allocate and return a folio or NULL if allocation fails.
100+
*/
101+
struct folio *(*alloc_folio)(struct vm_area_struct *vma,
102+
unsigned long addr);
97103
};
98104

99105
/* A combined operation mode + behavior flags. */

mm/userfaultfd.c

Lines changed: 48 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,26 @@ static bool anon_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
4242
return true;
4343
}
4444

45+
static struct folio *anon_alloc_folio(struct vm_area_struct *vma,
46+
unsigned long addr)
47+
{
48+
struct folio *folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
49+
addr);
50+
51+
if (!folio)
52+
return NULL;
53+
54+
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) {
55+
folio_put(folio);
56+
return NULL;
57+
}
58+
59+
return folio;
60+
}
61+
4562
static const struct vm_uffd_ops anon_uffd_ops = {
4663
.can_userfault = anon_can_userfault,
64+
.alloc_folio = anon_alloc_folio,
4765
};
4866

4967
static const struct vm_uffd_ops *vma_uffd_ops(struct vm_area_struct *vma)
@@ -456,34 +474,37 @@ static int mfill_copy_folio_retry(struct mfill_state *state, struct folio *folio
456474
return 0;
457475
}
458476

459-
static int mfill_atomic_pte_copy(struct mfill_state *state)
477+
static int __mfill_atomic_pte(struct mfill_state *state,
478+
const struct vm_uffd_ops *ops)
460479
{
461480
unsigned long dst_addr = state->dst_addr;
462481
unsigned long src_addr = state->src_addr;
463482
uffd_flags_t flags = state->flags;
464483
struct folio *folio;
465484
int ret;
466485

467-
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, state->vma, dst_addr);
486+
folio = ops->alloc_folio(state->vma, state->dst_addr);
468487
if (!folio)
469488
return -ENOMEM;
470489

471-
ret = -ENOMEM;
472-
if (mem_cgroup_charge(folio, state->vma->vm_mm, GFP_KERNEL))
473-
goto out_release;
474-
475-
ret = mfill_copy_folio_locked(folio, src_addr);
476-
if (unlikely(ret)) {
490+
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
491+
ret = mfill_copy_folio_locked(folio, src_addr);
477492
/*
478493
* Fallback to copy_from_user outside mmap_lock.
479494
* If retry is successful, mfill_copy_folio_locked() returns
480495
* with locks retaken by mfill_get_vma().
481496
* If there was an error, we must mfill_put_vma() anyway and it
482497
* will take care of unlocking if needed.
483498
*/
484-
ret = mfill_copy_folio_retry(state, folio);
485-
if (ret)
486-
goto out_release;
499+
if (unlikely(ret)) {
500+
ret = mfill_copy_folio_retry(state, folio);
501+
if (ret)
502+
goto err_folio_put;
503+
}
504+
} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
505+
clear_user_highpage(&folio->page, state->dst_addr);
506+
} else {
507+
VM_WARN_ONCE(1, "Unknown UFFDIO operation, flags: %x", flags);
487508
}
488509

489510
/*
@@ -496,47 +517,30 @@ static int mfill_atomic_pte_copy(struct mfill_state *state)
496517
ret = mfill_atomic_install_pte(state->pmd, state->vma, dst_addr,
497518
&folio->page, true, flags);
498519
if (ret)
499-
goto out_release;
500-
out:
501-
return ret;
502-
out_release:
520+
goto err_folio_put;
521+
522+
return 0;
523+
524+
err_folio_put:
525+
folio_put(folio);
503526
/* Don't return -ENOENT so that our caller won't retry */
504527
if (ret == -ENOENT)
505528
ret = -EFAULT;
506-
folio_put(folio);
507-
goto out;
529+
return ret;
508530
}
509531

510-
static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
511-
struct vm_area_struct *dst_vma,
512-
unsigned long dst_addr)
532+
static int mfill_atomic_pte_copy(struct mfill_state *state)
513533
{
514-
struct folio *folio;
515-
int ret = -ENOMEM;
516-
517-
folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
518-
if (!folio)
519-
return ret;
520-
521-
if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
522-
goto out_put;
534+
const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
523535

524-
/*
525-
* The memory barrier inside __folio_mark_uptodate makes sure that
526-
* zeroing out the folio become visible before mapping the page
527-
* using set_pte_at(). See do_anonymous_page().
528-
*/
529-
__folio_mark_uptodate(folio);
536+
return __mfill_atomic_pte(state, ops);
537+
}
530538

531-
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
532-
&folio->page, true, 0);
533-
if (ret)
534-
goto out_put;
539+
static int mfill_atomic_pte_zeroed_folio(struct mfill_state *state)
540+
{
541+
const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
535542

536-
return 0;
537-
out_put:
538-
folio_put(folio);
539-
return ret;
543+
return __mfill_atomic_pte(state, ops);
540544
}
541545

542546
static int mfill_atomic_pte_zeropage(struct mfill_state *state)
@@ -549,7 +553,7 @@ static int mfill_atomic_pte_zeropage(struct mfill_state *state)
549553
int ret;
550554

551555
if (mm_forbids_zeropage(dst_vma->vm_mm))
552-
return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
556+
return mfill_atomic_pte_zeroed_folio(state);
553557

554558
_dst_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
555559
dst_vma->vm_page_prot));

0 commit comments

Comments
 (0)