Skip to content

Commit ee1ee8a

Browse files
Matthew Wilcox (Oracle)tehcaster
authored andcommitted
slab: Remove folio references from __ksize()
In the future, we will separate slab, folio and page from each other and calling virt_to_folio() on an address allocated from slab will return NULL. Delay the conversion from struct page to struct slab until we know we're not dealing with a large kmalloc allocation. There's a minor win for large kmalloc allocations as we avoid the compound_head() hidden in virt_to_folio(). This deprecates calling ksize() on memory allocated by alloc_pages(). Today it becomes a warning and support will be removed entirely in the future. Introduce large_kmalloc_size() to abstract how we represent the size of a large kmalloc allocation. For now, this is the same as page_size(), but it will change with separately allocated memdescs. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Link: https://patch.msgid.link/20251113000932.1589073-3-willy@infradead.org Acked-by: David Hildenbrand (Red Hat) <david@kernel.org> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 2bcd380 commit ee1ee8a

3 files changed

Lines changed: 23 additions & 12 deletions

File tree

include/linux/page-flags.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1064,7 +1064,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
10641064
* Serialized with zone lock.
10651065
*/
10661066
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
1067-
FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
1067+
PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
10681068

10691069
/**
10701070
* PageHuge - Determine if the page belongs to hugetlbfs

mm/slab.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -605,6 +605,16 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
605605
return s->size;
606606
}
607607

608+
static inline unsigned int large_kmalloc_order(const struct page *page)
609+
{
610+
return page[1].flags.f & 0xff;
611+
}
612+
613+
static inline size_t large_kmalloc_size(const struct page *page)
614+
{
615+
return PAGE_SIZE << large_kmalloc_order(page);
616+
}
617+
608618
#ifdef CONFIG_SLUB_DEBUG
609619
void dump_unreclaimable_slab(void);
610620
#else

mm/slab_common.c

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -997,26 +997,27 @@ void __init create_kmalloc_caches(void)
997997
*/
998998
size_t __ksize(const void *object)
999999
{
1000-
struct folio *folio;
1000+
const struct page *page;
1001+
const struct slab *slab;
10011002

10021003
if (unlikely(object == ZERO_SIZE_PTR))
10031004
return 0;
10041005

1005-
folio = virt_to_folio(object);
1006+
page = virt_to_page(object);
10061007

1007-
if (unlikely(!folio_test_slab(folio))) {
1008-
if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
1009-
return 0;
1010-
if (WARN_ON(object != folio_address(folio)))
1011-
return 0;
1012-
return folio_size(folio);
1013-
}
1008+
if (unlikely(PageLargeKmalloc(page)))
1009+
return large_kmalloc_size(page);
1010+
1011+
slab = page_slab(page);
1012+
/* Delete this after we're sure there are no users */
1013+
if (WARN_ON(!slab))
1014+
return page_size(page);
10141015

10151016
#ifdef CONFIG_SLUB_DEBUG
1016-
skip_orig_size_check(folio_slab(folio)->slab_cache, object);
1017+
skip_orig_size_check(slab->slab_cache, object);
10171018
#endif
10181019

1019-
return slab_ksize(folio_slab(folio)->slab_cache);
1020+
return slab_ksize(slab->slab_cache);
10201021
}
10211022

10221023
gfp_t kmalloc_fix_flags(gfp_t flags)

0 commit comments

Comments
 (0)