Skip to content

Commit f2a7985

Browse files
rpptgregkh
authored andcommitted
mm: fix initialization of struct page for holes in memory layout
commit d3921cb upstream. There could be struct pages that are not backed by actual physical memory. This can happen when the actual memory bank is not a multiple of SECTION_SIZE or when an architecture does not register memory holes reserved by the firmware as memblock.memory. Such pages are currently initialized using init_unavailable_mem() function that iterates through PFNs in holes in memblock.memory and if there is a struct page corresponding to a PFN, the fields if this page are set to default values and the page is marked as Reserved. init_unavailable_mem() does not take into account zone and node the page belongs to and sets both zone and node links in struct page to zero. On a system that has firmware reserved holes in a zone above ZONE_DMA, for instance in a configuration below: # grep -A1 E820 /proc/iomem 7a17b000-7a216fff : Unknown E820 type 7a217000-7bffffff : System RAM unset zone link in struct page will trigger VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); because there are pages in both ZONE_DMA32 and ZONE_DMA (unset zone link in struct page) in the same pageblock. Update init_unavailable_mem() to use zone constraints defined by an architecture to properly setup the zone link and use node ID of the adjacent range in memblock.memory to set the node link. Link: https://lkml.kernel.org/r/20210111194017.22696-3-rppt@kernel.org Fixes: 73a6e47 ("mm: memmap_init: iterate over memblock regions rather that check each PFN") Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Reported-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: David Hildenbrand <david@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Qian Cai <cai@lca.pw> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 5405cb3 commit f2a7985

1 file changed

Lines changed: 50 additions & 34 deletions

File tree

mm/page_alloc.c

Lines changed: 50 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -7003,23 +7003,26 @@ void __init free_area_init_memoryless_node(int nid)
70037003
* Initialize all valid struct pages in the range [spfn, epfn) and mark them
70047004
* PageReserved(). Return the number of struct pages that were initialized.
70057005
*/
7006-
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
7006+
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn,
7007+
int zone, int nid)
70077008
{
7008-
unsigned long pfn;
7009+
unsigned long pfn, zone_spfn, zone_epfn;
70097010
u64 pgcnt = 0;
70107011

7012+
zone_spfn = arch_zone_lowest_possible_pfn[zone];
7013+
zone_epfn = arch_zone_highest_possible_pfn[zone];
7014+
7015+
spfn = clamp(spfn, zone_spfn, zone_epfn);
7016+
epfn = clamp(epfn, zone_spfn, zone_epfn);
7017+
70117018
for (pfn = spfn; pfn < epfn; pfn++) {
70127019
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
70137020
pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
70147021
+ pageblock_nr_pages - 1;
70157022
continue;
70167023
}
7017-
/*
7018-
* Use a fake node/zone (0) for now. Some of these pages
7019-
* (in memblock.reserved but not in memblock.memory) will
7020-
* get re-initialized via reserve_bootmem_region() later.
7021-
*/
7022-
__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
7024+
7025+
__init_single_page(pfn_to_page(pfn), pfn, zone, nid);
70237026
__SetPageReserved(pfn_to_page(pfn));
70247027
pgcnt++;
70257028
}
@@ -7028,51 +7031,64 @@ static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
70287031
}
70297032

70307033
/*
7031-
* Only struct pages that are backed by physical memory are zeroed and
7032-
* initialized by going through __init_single_page(). But, there are some
7033-
* struct pages which are reserved in memblock allocator and their fields
7034-
* may be accessed (for example page_to_pfn() on some configuration accesses
7035-
* flags). We must explicitly initialize those struct pages.
7034+
* Only struct pages that correspond to ranges defined by memblock.memory
7035+
* are zeroed and initialized by going through __init_single_page() during
7036+
* memmap_init().
7037+
*
7038+
* But, there could be struct pages that correspond to holes in
7039+
* memblock.memory. This can happen because of the following reasons:
7040+
* - phyiscal memory bank size is not necessarily the exact multiple of the
7041+
* arbitrary section size
7042+
* - early reserved memory may not be listed in memblock.memory
7043+
* - memory layouts defined with memmap= kernel parameter may not align
7044+
* nicely with memmap sections
70367045
*
7037-
* This function also addresses a similar issue where struct pages are left
7038-
* uninitialized because the physical address range is not covered by
7039-
* memblock.memory or memblock.reserved. That could happen when memblock
7040-
* layout is manually configured via memmap=, or when the highest physical
7041-
* address (max_pfn) does not end on a section boundary.
7046+
* Explicitly initialize those struct pages so that:
7047+
* - PG_Reserved is set
7048+
* - zone link is set accorging to the architecture constrains
7049+
* - node is set to node id of the next populated region except for the
7050+
* trailing hole where last node id is used
70427051
*/
7043-
static void __init init_unavailable_mem(void)
7052+
static void __init init_zone_unavailable_mem(int zone)
70447053
{
7045-
phys_addr_t start, end;
7046-
u64 i, pgcnt;
7047-
phys_addr_t next = 0;
7054+
unsigned long start, end;
7055+
int i, nid;
7056+
u64 pgcnt;
7057+
unsigned long next = 0;
70487058

70497059
/*
7050-
* Loop through unavailable ranges not covered by memblock.memory.
7060+
* Loop through holes in memblock.memory and initialize struct
7061+
* pages corresponding to these holes
70517062
*/
70527063
pgcnt = 0;
7053-
for_each_mem_range(i, &start, &end) {
7064+
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
70547065
if (next < start)
7055-
pgcnt += init_unavailable_range(PFN_DOWN(next),
7056-
PFN_UP(start));
7066+
pgcnt += init_unavailable_range(next, start, zone, nid);
70577067
next = end;
70587068
}
70597069

70607070
/*
7061-
* Early sections always have a fully populated memmap for the whole
7062-
* section - see pfn_valid(). If the last section has holes at the
7063-
* end and that section is marked "online", the memmap will be
7064-
* considered initialized. Make sure that memmap has a well defined
7065-
* state.
7071+
* Last section may surpass the actual end of memory (e.g. we can
7072+
* have 1Gb section and 512Mb of RAM pouplated).
7073+
* Make sure that memmap has a well defined state in this case.
70667074
*/
7067-
pgcnt += init_unavailable_range(PFN_DOWN(next),
7068-
round_up(max_pfn, PAGES_PER_SECTION));
7075+
end = round_up(max_pfn, PAGES_PER_SECTION);
7076+
pgcnt += init_unavailable_range(next, end, zone, nid);
70697077

70707078
/*
70717079
* Struct pages that do not have backing memory. This could be because
70727080
* firmware is using some of this memory, or for some other reasons.
70737081
*/
70747082
if (pgcnt)
7075-
pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
7083+
pr_info("Zone %s: zeroed struct page in unavailable ranges: %lld pages", zone_names[zone], pgcnt);
7084+
}
7085+
7086+
static void __init init_unavailable_mem(void)
7087+
{
7088+
int zone;
7089+
7090+
for (zone = 0; zone < ZONE_MOVABLE; zone++)
7091+
init_zone_unavailable_mem(zone);
70767092
}
70777093
#else
70787094
static inline void __init init_unavailable_mem(void)

0 commit comments

Comments
 (0)