Skip to content

Commit f74991b

Browse files
rpptakpm00
authored andcommitted
shmem, userfaultfd: implement shmem uffd operations using vm_uffd_ops
Add filemap_add() and filemap_remove() methods to vm_uffd_ops and use them in __mfill_atomic_pte() to add shmem folios to page cache and remove them in case of error. Implement these methods in shmem along with vm_uffd_ops->alloc_folio() and drop shmem_mfill_atomic_pte(). Since userfaultfd now does not reference any functions from shmem, drop include if linux/shmem_fs.h from mm/userfaultfd.c mfill_atomic_install_pte() is not used anywhere outside of mm/userfaultfd, make it static. Link: https://lore.kernel.org/20260402041156.1377214-11-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reviewed-by: James Houghton <jthoughton@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andrei Vagin <avagin@google.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: David Hildenbrand (Arm) <david@kernel.org> Cc: Harry Yoo <harry.yoo@oracle.com> Cc: Harry Yoo (Oracle) <harry@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Nikita Kalyazin <kalyazin@amazon.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: David Carlier <devnexen@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent ad9ac30 commit f74991b

4 files changed

Lines changed: 106 additions & 155 deletions

File tree

include/linux/shmem_fs.h

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -221,20 +221,6 @@ static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof)
221221

222222
extern bool shmem_charge(struct inode *inode, long pages);
223223

224-
#ifdef CONFIG_USERFAULTFD
225-
#ifdef CONFIG_SHMEM
226-
extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
227-
struct vm_area_struct *dst_vma,
228-
unsigned long dst_addr,
229-
unsigned long src_addr,
230-
uffd_flags_t flags,
231-
struct folio **foliop);
232-
#else /* !CONFIG_SHMEM */
233-
#define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
234-
src_addr, flags, foliop) ({ BUG(); 0; })
235-
#endif /* CONFIG_SHMEM */
236-
#endif /* CONFIG_USERFAULTFD */
237-
238224
/*
239225
* Used space is stored as unsigned 64-bit value in bytes but
240226
* quota core supports only signed 64-bit values so use that

include/linux/userfaultfd_k.h

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,20 @@ struct vm_uffd_ops {
100100
*/
101101
struct folio *(*alloc_folio)(struct vm_area_struct *vma,
102102
unsigned long addr);
103+
/*
104+
* Called during resolution of UFFDIO_COPY request.
105+
* Should only be called with a folio returned by alloc_folio() above.
106+
* The folio will be set to locked.
107+
* Returns 0 on success, error code on failure.
108+
*/
109+
int (*filemap_add)(struct folio *folio, struct vm_area_struct *vma,
110+
unsigned long addr);
111+
/*
112+
* Called during resolution of UFFDIO_COPY request on the error
113+
* handling path.
114+
* Should revert the operation of ->filemap_add().
115+
*/
116+
void (*filemap_remove)(struct folio *folio, struct vm_area_struct *vma);
103117
};
104118

105119
/* A combined operation mode + behavior flags. */
@@ -133,11 +147,6 @@ static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_at
133147
/* Flags controlling behavior. These behavior changes are mode-independent. */
134148
#define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0)
135149

136-
extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
137-
struct vm_area_struct *dst_vma,
138-
unsigned long dst_addr, struct page *page,
139-
bool newly_allocated, uffd_flags_t flags);
140-
141150
extern ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
142151
unsigned long src_start, unsigned long len,
143152
uffd_flags_t flags);

mm/shmem.c

Lines changed: 53 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -3175,118 +3175,73 @@ static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
31753175
#endif /* CONFIG_TMPFS_QUOTA */
31763176

31773177
#ifdef CONFIG_USERFAULTFD
3178-
int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
3179-
struct vm_area_struct *dst_vma,
3180-
unsigned long dst_addr,
3181-
unsigned long src_addr,
3182-
uffd_flags_t flags,
3183-
struct folio **foliop)
3184-
{
3185-
struct inode *inode = file_inode(dst_vma->vm_file);
3186-
struct shmem_inode_info *info = SHMEM_I(inode);
3178+
static struct folio *shmem_mfill_folio_alloc(struct vm_area_struct *vma,
3179+
unsigned long addr)
3180+
{
3181+
struct inode *inode = file_inode(vma->vm_file);
31873182
struct address_space *mapping = inode->i_mapping;
3183+
struct shmem_inode_info *info = SHMEM_I(inode);
3184+
pgoff_t pgoff = linear_page_index(vma, addr);
31883185
gfp_t gfp = mapping_gfp_mask(mapping);
3189-
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
3190-
void *page_kaddr;
31913186
struct folio *folio;
3192-
int ret;
3193-
pgoff_t max_off;
3194-
3195-
if (shmem_inode_acct_blocks(inode, 1)) {
3196-
/*
3197-
* We may have got a page, returned -ENOENT triggering a retry,
3198-
* and now we find ourselves with -ENOMEM. Release the page, to
3199-
* avoid a BUG_ON in our caller.
3200-
*/
3201-
if (unlikely(*foliop)) {
3202-
folio_put(*foliop);
3203-
*foliop = NULL;
3204-
}
3205-
return -ENOMEM;
3206-
}
32073187

3208-
if (!*foliop) {
3209-
ret = -ENOMEM;
3210-
folio = shmem_alloc_folio(gfp, 0, info, pgoff);
3211-
if (!folio)
3212-
goto out_unacct_blocks;
3188+
if (unlikely(pgoff >= DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE)))
3189+
return NULL;
32133190

3214-
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
3215-
page_kaddr = kmap_local_folio(folio, 0);
3216-
/*
3217-
* The read mmap_lock is held here. Despite the
3218-
* mmap_lock being read recursive a deadlock is still
3219-
* possible if a writer has taken a lock. For example:
3220-
*
3221-
* process A thread 1 takes read lock on own mmap_lock
3222-
* process A thread 2 calls mmap, blocks taking write lock
3223-
* process B thread 1 takes page fault, read lock on own mmap lock
3224-
* process B thread 2 calls mmap, blocks taking write lock
3225-
* process A thread 1 blocks taking read lock on process B
3226-
* process B thread 1 blocks taking read lock on process A
3227-
*
3228-
* Disable page faults to prevent potential deadlock
3229-
* and retry the copy outside the mmap_lock.
3230-
*/
3231-
pagefault_disable();
3232-
ret = copy_from_user(page_kaddr,
3233-
(const void __user *)src_addr,
3234-
PAGE_SIZE);
3235-
pagefault_enable();
3236-
kunmap_local(page_kaddr);
3237-
3238-
/* fallback to copy_from_user outside mmap_lock */
3239-
if (unlikely(ret)) {
3240-
*foliop = folio;
3241-
ret = -ENOENT;
3242-
/* don't free the page */
3243-
goto out_unacct_blocks;
3244-
}
3191+
folio = shmem_alloc_folio(gfp, 0, info, pgoff);
3192+
if (!folio)
3193+
return NULL;
32453194

3246-
flush_dcache_folio(folio);
3247-
} else { /* ZEROPAGE */
3248-
clear_user_highpage(&folio->page, dst_addr);
3249-
}
3250-
} else {
3251-
folio = *foliop;
3252-
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
3253-
*foliop = NULL;
3195+
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) {
3196+
folio_put(folio);
3197+
return NULL;
32543198
}
32553199

3256-
VM_BUG_ON(folio_test_locked(folio));
3257-
VM_BUG_ON(folio_test_swapbacked(folio));
3200+
return folio;
3201+
}
3202+
3203+
static int shmem_mfill_filemap_add(struct folio *folio,
3204+
struct vm_area_struct *vma,
3205+
unsigned long addr)
3206+
{
3207+
struct inode *inode = file_inode(vma->vm_file);
3208+
struct address_space *mapping = inode->i_mapping;
3209+
pgoff_t pgoff = linear_page_index(vma, addr);
3210+
gfp_t gfp = mapping_gfp_mask(mapping);
3211+
int err;
3212+
32583213
__folio_set_locked(folio);
32593214
__folio_set_swapbacked(folio);
3260-
__folio_mark_uptodate(folio);
3261-
3262-
ret = -EFAULT;
3263-
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3264-
if (unlikely(pgoff >= max_off))
3265-
goto out_release;
32663215

3267-
ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
3268-
if (ret)
3269-
goto out_release;
3270-
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
3271-
if (ret)
3272-
goto out_release;
3216+
err = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
3217+
if (err)
3218+
goto err_unlock;
32733219

3274-
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
3275-
&folio->page, true, flags);
3276-
if (ret)
3277-
goto out_delete_from_cache;
3220+
if (shmem_inode_acct_blocks(inode, 1)) {
3221+
err = -ENOMEM;
3222+
goto err_delete_from_cache;
3223+
}
32783224

3225+
folio_add_lru(folio);
32793226
shmem_recalc_inode(inode, 1, 0);
3280-
folio_unlock(folio);
3227+
32813228
return 0;
3282-
out_delete_from_cache:
3229+
3230+
err_delete_from_cache:
32833231
filemap_remove_folio(folio);
3284-
out_release:
3232+
err_unlock:
3233+
folio_unlock(folio);
3234+
return err;
3235+
}
3236+
3237+
static void shmem_mfill_filemap_remove(struct folio *folio,
3238+
struct vm_area_struct *vma)
3239+
{
3240+
struct inode *inode = file_inode(vma->vm_file);
3241+
3242+
filemap_remove_folio(folio);
3243+
shmem_recalc_inode(inode, 0, 0);
32853244
folio_unlock(folio);
3286-
folio_put(folio);
3287-
out_unacct_blocks:
3288-
shmem_inode_unacct_blocks(inode, 1);
3289-
return ret;
32903245
}
32913246

32923247
static struct folio *shmem_get_folio_noalloc(struct inode *inode, pgoff_t pgoff)
@@ -3309,6 +3264,9 @@ static bool shmem_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
33093264
static const struct vm_uffd_ops shmem_uffd_ops = {
33103265
.can_userfault = shmem_can_userfault,
33113266
.get_folio_noalloc = shmem_get_folio_noalloc,
3267+
.alloc_folio = shmem_mfill_folio_alloc,
3268+
.filemap_add = shmem_mfill_filemap_add,
3269+
.filemap_remove = shmem_mfill_filemap_remove,
33123270
};
33133271
#endif /* CONFIG_USERFAULTFD */
33143272

mm/userfaultfd.c

Lines changed: 39 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
#include <linux/userfaultfd_k.h>
1515
#include <linux/mmu_notifier.h>
1616
#include <linux/hugetlb.h>
17-
#include <linux/shmem_fs.h>
1817
#include <asm/tlbflush.h>
1918
#include <asm/tlb.h>
2019
#include "internal.h"
@@ -338,10 +337,10 @@ static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
338337
* This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
339338
* and anon, and for both shared and private VMAs.
340339
*/
341-
int mfill_atomic_install_pte(pmd_t *dst_pmd,
342-
struct vm_area_struct *dst_vma,
343-
unsigned long dst_addr, struct page *page,
344-
bool newly_allocated, uffd_flags_t flags)
340+
static int mfill_atomic_install_pte(pmd_t *dst_pmd,
341+
struct vm_area_struct *dst_vma,
342+
unsigned long dst_addr, struct page *page,
343+
uffd_flags_t flags)
345344
{
346345
int ret;
347346
struct mm_struct *dst_mm = dst_vma->vm_mm;
@@ -385,9 +384,6 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
385384
goto out_unlock;
386385

387386
if (page_in_cache) {
388-
/* Usually, cache pages are already added to LRU */
389-
if (newly_allocated)
390-
folio_add_lru(folio);
391387
folio_add_file_rmap_pte(folio, page, dst_vma);
392388
} else {
393389
folio_add_new_anon_rmap(folio, dst_vma, dst_addr, RMAP_EXCLUSIVE);
@@ -402,6 +398,9 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
402398

403399
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
404400

401+
if (page_in_cache)
402+
folio_unlock(folio);
403+
405404
/* No need to invalidate - it was non-present before */
406405
update_mmu_cache(dst_vma, dst_addr, dst_pte);
407406
ret = 0;
@@ -514,13 +513,22 @@ static int __mfill_atomic_pte(struct mfill_state *state,
514513
*/
515514
__folio_mark_uptodate(folio);
516515

516+
if (ops->filemap_add) {
517+
ret = ops->filemap_add(folio, state->vma, state->dst_addr);
518+
if (ret)
519+
goto err_folio_put;
520+
}
521+
517522
ret = mfill_atomic_install_pte(state->pmd, state->vma, dst_addr,
518-
&folio->page, true, flags);
523+
&folio->page, flags);
519524
if (ret)
520-
goto err_folio_put;
525+
goto err_filemap_remove;
521526

522527
return 0;
523528

529+
err_filemap_remove:
530+
if (ops->filemap_remove)
531+
ops->filemap_remove(folio, state->vma);
524532
err_folio_put:
525533
folio_put(folio);
526534
/* Don't return -ENOENT so that our caller won't retry */
@@ -533,6 +541,18 @@ static int mfill_atomic_pte_copy(struct mfill_state *state)
533541
{
534542
const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
535543

544+
/*
545+
* The normal page fault path for a MAP_PRIVATE mapping in a
546+
* file-backed VMA will invoke the fault, fill the hole in the file and
547+
* COW it right away. The result generates plain anonymous memory.
548+
* So when we are asked to fill a hole in a MAP_PRIVATE mapping, we'll
549+
* generate anonymous memory directly without actually filling the
550+
* hole. For the MAP_PRIVATE case the robustness check only happens in
551+
* the pagetable (to verify it's still none) and not in the page cache.
552+
*/
553+
if (!(state->vma->vm_flags & VM_SHARED))
554+
ops = &anon_uffd_ops;
555+
536556
return __mfill_atomic_pte(state, ops);
537557
}
538558

@@ -552,7 +572,8 @@ static int mfill_atomic_pte_zeropage(struct mfill_state *state)
552572
spinlock_t *ptl;
553573
int ret;
554574

555-
if (mm_forbids_zeropage(dst_vma->vm_mm))
575+
if (mm_forbids_zeropage(dst_vma->vm_mm) ||
576+
(dst_vma->vm_flags & VM_SHARED))
556577
return mfill_atomic_pte_zeroed_folio(state);
557578

558579
_dst_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
@@ -609,11 +630,10 @@ static int mfill_atomic_pte_continue(struct mfill_state *state)
609630
}
610631

611632
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
612-
page, false, flags);
633+
page, flags);
613634
if (ret)
614635
goto out_release;
615636

616-
folio_unlock(folio);
617637
return 0;
618638

619639
out_release:
@@ -836,41 +856,19 @@ extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
836856

837857
static __always_inline ssize_t mfill_atomic_pte(struct mfill_state *state)
838858
{
839-
struct vm_area_struct *dst_vma = state->vma;
840-
unsigned long src_addr = state->src_addr;
841-
unsigned long dst_addr = state->dst_addr;
842-
struct folio **foliop = &state->folio;
843859
uffd_flags_t flags = state->flags;
844-
pmd_t *dst_pmd = state->pmd;
845-
ssize_t err;
846860

847861
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
848862
return mfill_atomic_pte_continue(state);
849863
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON))
850864
return mfill_atomic_pte_poison(state);
865+
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
866+
return mfill_atomic_pte_copy(state);
867+
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE))
868+
return mfill_atomic_pte_zeropage(state);
851869

852-
/*
853-
* The normal page fault path for a shmem will invoke the
854-
* fault, fill the hole in the file and COW it right away. The
855-
* result generates plain anonymous memory. So when we are
856-
* asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
857-
* generate anonymous memory directly without actually filling
858-
* the hole. For the MAP_PRIVATE case the robustness check
859-
* only happens in the pagetable (to verify it's still none)
860-
* and not in the radix tree.
861-
*/
862-
if (!(dst_vma->vm_flags & VM_SHARED)) {
863-
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
864-
err = mfill_atomic_pte_copy(state);
865-
else
866-
err = mfill_atomic_pte_zeropage(state);
867-
} else {
868-
err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
869-
dst_addr, src_addr,
870-
flags, foliop);
871-
}
872-
873-
return err;
870+
VM_WARN_ONCE(1, "Unknown UFFDIO operation, flags: %x", flags);
871+
return -EOPNOTSUPP;
874872
}
875873

876874
static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,

0 commit comments

Comments
 (0)