Skip to content

Commit 2bcd380

Browse files
Matthew Wilcox (Oracle)tehcaster
authored andcommitted
slab: Reimplement page_slab()
In order to separate slabs from folios, we need to convert from any page in a slab to the slab directly without going through a page to folio conversion first. Up to this point, page_slab() has followed the example of other memdesc converters (page_folio(), page_ptdesc() etc) and just cast the pointer to the requested type, regardless of whether the pointer is actually a pointer to the correct type or not. That changes with this commit; we check that the page actually belongs to a slab and return NULL if it does not. Other memdesc converters will adopt this convention in future. kfence was the only user of page_slab(), so adjust it to the new way of working. It will need to be touched again when we separate slab from page. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Alexander Potapenko <glider@google.com> Cc: Marco Elver <elver@google.com> Cc: kasan-dev@googlegroups.com Link: https://patch.msgid.link/20251113000932.1589073-2-willy@infradead.org Acked-by: David Hildenbrand (Red Hat) <david@kernel.org> Tested-by: Marco Elver <elver@google.com> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent dcb6fa3 commit 2bcd380

3 files changed

Lines changed: 25 additions & 31 deletions

File tree

include/linux/page-flags.h

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1048,19 +1048,7 @@ PAGE_TYPE_OPS(Table, table, pgtable)
10481048
*/
10491049
PAGE_TYPE_OPS(Guard, guard, guard)
10501050

1051-
FOLIO_TYPE_OPS(slab, slab)
1052-
1053-
/**
1054-
* PageSlab - Determine if the page belongs to the slab allocator
1055-
* @page: The page to test.
1056-
*
1057-
* Context: Any context.
1058-
* Return: True for slab pages, false for any other kind of page.
1059-
*/
1060-
static inline bool PageSlab(const struct page *page)
1061-
{
1062-
return folio_test_slab(page_folio(page));
1063-
}
1051+
PAGE_TYPE_OPS(Slab, slab, slab)
10641052

10651053
#ifdef CONFIG_HUGETLB_PAGE
10661054
FOLIO_TYPE_OPS(hugetlb, hugetlb)

mm/kfence/core.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -612,14 +612,15 @@ static unsigned long kfence_init_pool(void)
612612
* enters __slab_free() slow-path.
613613
*/
614614
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
615-
struct slab *slab;
615+
struct page *page;
616616

617617
if (!i || (i % 2))
618618
continue;
619619

620-
slab = page_slab(pfn_to_page(start_pfn + i));
621-
__folio_set_slab(slab_folio(slab));
620+
page = pfn_to_page(start_pfn + i);
621+
__SetPageSlab(page);
622622
#ifdef CONFIG_MEMCG
623+
struct slab *slab = page_slab(page);
623624
slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
624625
MEMCG_DATA_OBJEXTS;
625626
#endif
@@ -665,16 +666,17 @@ static unsigned long kfence_init_pool(void)
665666

666667
reset_slab:
667668
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
668-
struct slab *slab;
669+
struct page *page;
669670

670671
if (!i || (i % 2))
671672
continue;
672673

673-
slab = page_slab(pfn_to_page(start_pfn + i));
674+
page = pfn_to_page(start_pfn + i);
674675
#ifdef CONFIG_MEMCG
676+
struct slab *slab = page_slab(page);
675677
slab->obj_exts = 0;
676678
#endif
677-
__folio_clear_slab(slab_folio(slab));
679+
__ClearPageSlab(page);
678680
}
679681

680682
return addr;

mm/slab.h

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -146,20 +146,24 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)
146146
struct slab *: (struct folio *)s))
147147

148148
/**
149-
* page_slab - Converts from first struct page to slab.
150-
* @p: The first (either head of compound or single) page of slab.
149+
* page_slab - Converts from struct page to its slab.
150+
* @page: A page which may or may not belong to a slab.
151151
*
152-
* A temporary wrapper to convert struct page to struct slab in situations where
153-
* we know the page is the compound head, or single order-0 page.
154-
*
155-
* Long-term ideally everything would work with struct slab directly or go
156-
* through folio to struct slab.
157-
*
158-
* Return: The slab which contains this page
152+
* Return: The slab which contains this page or NULL if the page does
153+
* not belong to a slab. This includes pages returned from large kmalloc.
159154
*/
160-
#define page_slab(p) (_Generic((p), \
161-
const struct page *: (const struct slab *)(p), \
162-
struct page *: (struct slab *)(p)))
155+
static inline struct slab *page_slab(const struct page *page)
156+
{
157+
unsigned long head;
158+
159+
head = READ_ONCE(page->compound_head);
160+
if (head & 1)
161+
page = (struct page *)(head - 1);
162+
if (data_race(page->page_type >> 24) != PGTY_slab)
163+
page = NULL;
164+
165+
return (struct slab *)page;
166+
}
163167

164168
/**
165169
* slab_page - The first struct page allocated for a slab

0 commit comments

Comments
 (0)