Skip to content

Commit 8b7b853

Browse files
committed
memblock: move reserve_bootmem_range() to memblock.c and make it static
reserve_bootmem_region() is only called from memmap_init_reserved_pages() and it was in mm/mm_init.c because of its dependecies on static init_deferred_page(). Since init_deferred_page() is not static anymore, move reserve_bootmem_region(), rename it to memmap_init_reserved_range() and make it static. Update the comment describing it to better reflect what the function does and drop bogus comment about reserved pages in free_bootmem_page(). Update memblock test stubs to reflect the core changes. Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Reviewed-by: David Hildenbrand (Arm) <david@kernel.org> Link: https://patch.msgid.link/20260323072042.3651061-1-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
1 parent 0709682 commit 8b7b853

7 files changed

Lines changed: 37 additions & 41 deletions

File tree

include/linux/bootmem_info.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,6 @@ static inline void free_bootmem_page(struct page *page)
4444
{
4545
enum bootmem_type type = bootmem_type(page);
4646

47-
/*
48-
* The reserve_bootmem_region sets the reserved flag on bootmem
49-
* pages.
50-
*/
5147
VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
5248

5349
if (type == SECTION_INFO || type == MIX_SECTION_INFO)

include/linux/mm.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3686,9 +3686,6 @@ extern unsigned long free_reserved_area(void *start, void *end,
36863686

36873687
extern void adjust_managed_page_count(struct page *page, long count);
36883688

3689-
extern void reserve_bootmem_region(phys_addr_t start,
3690-
phys_addr_t end, int nid);
3691-
36923689
/* Free the reserved page into the buddy system, so it gets managed. */
36933690
void free_reserved_page(struct page *page);
36943691

mm/memblock.c

Lines changed: 28 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -974,7 +974,7 @@ __init void memmap_init_kho_scratch_pages(void)
974974
/*
975975
* Initialize struct pages for free scratch memory.
976976
* The struct pages for reserved scratch memory will be set up in
977-
* reserve_bootmem_region()
977+
* memmap_init_reserved_pages()
978978
*/
979979
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
980980
MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
@@ -2241,6 +2241,31 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
22412241
return end_pfn - start_pfn;
22422242
}
22432243

2244+
/*
2245+
* Initialised pages do not have PageReserved set. This function is called
2246+
* for each reserved range and marks the pages PageReserved.
2247+
* When deferred initialization of struct pages is enabled it also ensures
2248+
* that struct pages are properly initialised.
2249+
*/
2250+
static void __init memmap_init_reserved_range(phys_addr_t start,
2251+
phys_addr_t end, int nid)
2252+
{
2253+
unsigned long pfn;
2254+
2255+
for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
2256+
struct page *page = pfn_to_page(pfn);
2257+
2258+
init_deferred_page(pfn, nid);
2259+
2260+
/*
2261+
* no need for atomic set_bit because the struct
2262+
* page is not visible yet so nobody should
2263+
* access it yet.
2264+
*/
2265+
__SetPageReserved(page);
2266+
}
2267+
}
2268+
22442269
static void __init memmap_init_reserved_pages(void)
22452270
{
22462271
struct memblock_region *region;
@@ -2260,7 +2285,7 @@ static void __init memmap_init_reserved_pages(void)
22602285
end = start + region->size;
22612286

22622287
if (memblock_is_nomap(region))
2263-
reserve_bootmem_region(start, end, nid);
2288+
memmap_init_reserved_range(start, end, nid);
22642289

22652290
memblock_set_node(start, region->size, &memblock.reserved, nid);
22662291
}
@@ -2285,7 +2310,7 @@ static void __init memmap_init_reserved_pages(void)
22852310
if (!numa_valid_node(nid))
22862311
nid = early_pfn_to_nid(PFN_DOWN(start));
22872312

2288-
reserve_bootmem_region(start, end, nid);
2313+
memmap_init_reserved_range(start, end, nid);
22892314
}
22902315
}
22912316
}

mm/mm_init.c

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -772,31 +772,6 @@ void __meminit init_deferred_page(unsigned long pfn, int nid)
772772
__init_deferred_page(pfn, nid);
773773
}
774774

775-
/*
776-
* Initialised pages do not have PageReserved set. This function is
777-
* called for each range allocated by the bootmem allocator and
778-
* marks the pages PageReserved. The remaining valid pages are later
779-
* sent to the buddy page allocator.
780-
*/
781-
void __meminit reserve_bootmem_region(phys_addr_t start,
782-
phys_addr_t end, int nid)
783-
{
784-
unsigned long pfn;
785-
786-
for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
787-
struct page *page = pfn_to_page(pfn);
788-
789-
__init_deferred_page(pfn, nid);
790-
791-
/*
792-
* no need for atomic set_bit because the struct
793-
* page is not visible yet so nobody should
794-
* access it yet.
795-
*/
796-
__SetPageReserved(page);
797-
}
798-
}
799-
800775
/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
801776
static bool __meminit
802777
overlap_memmap_init(unsigned long zone, unsigned long *pfn)

tools/include/linux/mm.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,6 @@ static inline phys_addr_t virt_to_phys(volatile void *address)
3232
return (phys_addr_t)address;
3333
}
3434

35-
void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
36-
3735
static inline void totalram_pages_inc(void)
3836
{
3937
}

tools/testing/memblock/internal.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,4 +29,13 @@ static inline unsigned long free_reserved_area(void *start, void *end,
2929
return 0;
3030
}
3131

32+
#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
33+
for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
34+
35+
static inline void init_deferred_page(unsigned long pfn, int nid)
36+
{
37+
}
38+
39+
#define __SetPageReserved(p) ((void)(p))
40+
3241
#endif

tools/testing/memblock/mmzone.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,6 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
1111
return NULL;
1212
}
1313

14-
void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid)
15-
{
16-
}
17-
1814
void atomic_long_set(atomic_long_t *v, long i)
1915
{
2016
}

0 commit comments

Comments
 (0)