Skip to content

Commit 0510bda

Browse files
committed
mm: move free_reserved_area() to mm/memblock.c
free_reserved_area() is related to memblock as it frees reserved memory back to the buddy allocator, similar to what memblock_free_late() does. Move free_reserved_area() to mm/memblock.c to prepare for further consolidation of the functions that free reserved memory. No functional changes. Link: https://patch.msgid.link/20260323074836.3653702-5-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
1 parent 8ff5d8f commit 0510bda

4 files changed

Lines changed: 68 additions & 40 deletions

File tree

mm/memblock.c

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -894,6 +894,42 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
894894
return memblock_remove_range(&memblock.memory, base, size);
895895
}
896896

897+
unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
898+
{
899+
void *pos;
900+
unsigned long pages = 0;
901+
902+
start = (void *)PAGE_ALIGN((unsigned long)start);
903+
end = (void *)((unsigned long)end & PAGE_MASK);
904+
for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
905+
struct page *page = virt_to_page(pos);
906+
void *direct_map_addr;
907+
908+
/*
909+
* 'direct_map_addr' might be different from 'pos'
910+
* because some architectures' virt_to_page()
911+
* work with aliases. Getting the direct map
912+
* address ensures that we get a _writeable_
913+
* alias for the memset().
914+
*/
915+
direct_map_addr = page_address(page);
916+
/*
917+
* Perform a kasan-unchecked memset() since this memory
918+
* has not been initialized.
919+
*/
920+
direct_map_addr = kasan_reset_tag(direct_map_addr);
921+
if ((unsigned int)poison <= 0xFF)
922+
memset(direct_map_addr, poison, PAGE_SIZE);
923+
924+
free_reserved_page(page);
925+
}
926+
927+
if (pages && s)
928+
pr_info("Freeing %s memory: %ldK\n", s, K(pages));
929+
930+
return pages;
931+
}
932+
897933
/**
898934
* memblock_free - free boot memory allocation
899935
* @ptr: starting address of the boot memory allocation
@@ -1777,7 +1813,6 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
17771813
totalram_pages_inc();
17781814
}
17791815
}
1780-
17811816
/*
17821817
* Remaining API functions
17831818
*/

mm/page_alloc.c

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -6234,42 +6234,6 @@ void adjust_managed_page_count(struct page *page, long count)
62346234
}
62356235
EXPORT_SYMBOL(adjust_managed_page_count);
62366236

6237-
unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
6238-
{
6239-
void *pos;
6240-
unsigned long pages = 0;
6241-
6242-
start = (void *)PAGE_ALIGN((unsigned long)start);
6243-
end = (void *)((unsigned long)end & PAGE_MASK);
6244-
for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
6245-
struct page *page = virt_to_page(pos);
6246-
void *direct_map_addr;
6247-
6248-
/*
6249-
* 'direct_map_addr' might be different from 'pos'
6250-
* because some architectures' virt_to_page()
6251-
* work with aliases. Getting the direct map
6252-
* address ensures that we get a _writeable_
6253-
* alias for the memset().
6254-
*/
6255-
direct_map_addr = page_address(page);
6256-
/*
6257-
* Perform a kasan-unchecked memset() since this memory
6258-
* has not been initialized.
6259-
*/
6260-
direct_map_addr = kasan_reset_tag(direct_map_addr);
6261-
if ((unsigned int)poison <= 0xFF)
6262-
memset(direct_map_addr, poison, PAGE_SIZE);
6263-
6264-
free_reserved_page(page);
6265-
}
6266-
6267-
if (pages && s)
6268-
pr_info("Freeing %s memory: %ldK\n", s, K(pages));
6269-
6270-
return pages;
6271-
}
6272-
62736237
void free_reserved_page(struct page *page)
62746238
{
62756239
clear_page_tag_ref(page);

tools/include/linux/mm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
#define __va(x) ((void *)((unsigned long)(x)))
1919
#define __pa(x) ((unsigned long)(x))
20+
#define __pa_symbol(x) ((unsigned long)(x))
2021

2122
#define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE))
2223

tools/testing/memblock/internal.h

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,22 @@ static int memblock_debug = 1;
1111

1212
#define pr_warn_ratelimited(fmt, ...) printf(fmt, ##__VA_ARGS__)
1313

14+
#define K(x) ((x) << (PAGE_SHIFT-10))
15+
1416
bool mirrored_kernelcore = false;
1517

1618
struct page {};
19+
static inline void *page_address(struct page *page)
20+
{
21+
BUG();
22+
return page;
23+
}
24+
25+
static inline struct page *virt_to_page(void *virt)
26+
{
27+
BUG();
28+
return virt;
29+
}
1730

1831
void memblock_free_pages(unsigned long pfn, unsigned int order)
1932
{
@@ -23,10 +36,25 @@ static inline void accept_memory(phys_addr_t start, unsigned long size)
2336
{
2437
}
2538

26-
static inline unsigned long free_reserved_area(void *start, void *end,
27-
int poison, const char *s)
39+
unsigned long free_reserved_area(void *start, void *end, int poison, const char *s);
40+
void free_reserved_page(struct page *page);
41+
42+
static inline bool deferred_pages_enabled(void)
43+
{
44+
return false;
45+
}
46+
47+
#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
48+
for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
49+
50+
static inline void *kasan_reset_tag(const void *addr)
51+
{
52+
return (void *)addr;
53+
}
54+
55+
static inline bool __is_kernel(unsigned long addr)
2856
{
29-
return 0;
57+
return false;
3058
}
3159

3260
#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \

0 commit comments

Comments
 (0)