Skip to content

Commit 1daa298

Browse files
torvaldsgregkh
authored andcommitted
Revert "mm: fix initialization of struct page for holes in memory layout"
commit 377bf66 upstream. This reverts commit d3921cb. Chris Wilson reports that it causes boot problems: "We have half a dozen or so different machines in CI that are silently failing to boot, that we believe is bisected to this patch" and the CI team confirmed that a revert fixed the issues. The cause is unknown for now, so let's revert it. Link: https://lore.kernel.org/lkml/161160687463.28991.354987542182281928@build.alporthouse.com/ Reported-and-tested-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent f2a7985 commit 1daa298

1 file changed

Lines changed: 34 additions & 50 deletions

File tree

mm/page_alloc.c

Lines changed: 34 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -7003,26 +7003,23 @@ void __init free_area_init_memoryless_node(int nid)
70037003
* Initialize all valid struct pages in the range [spfn, epfn) and mark them
70047004
* PageReserved(). Return the number of struct pages that were initialized.
70057005
*/
7006-
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn,
7007-
int zone, int nid)
7006+
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
70087007
{
7009-
unsigned long pfn, zone_spfn, zone_epfn;
7008+
unsigned long pfn;
70107009
u64 pgcnt = 0;
70117010

7012-
zone_spfn = arch_zone_lowest_possible_pfn[zone];
7013-
zone_epfn = arch_zone_highest_possible_pfn[zone];
7014-
7015-
spfn = clamp(spfn, zone_spfn, zone_epfn);
7016-
epfn = clamp(epfn, zone_spfn, zone_epfn);
7017-
70187011
for (pfn = spfn; pfn < epfn; pfn++) {
70197012
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
70207013
pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
70217014
+ pageblock_nr_pages - 1;
70227015
continue;
70237016
}
7024-
7025-
__init_single_page(pfn_to_page(pfn), pfn, zone, nid);
7017+
/*
7018+
* Use a fake node/zone (0) for now. Some of these pages
7019+
* (in memblock.reserved but not in memblock.memory) will
7020+
* get re-initialized via reserve_bootmem_region() later.
7021+
*/
7022+
__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
70267023
__SetPageReserved(pfn_to_page(pfn));
70277024
pgcnt++;
70287025
}
@@ -7031,64 +7028,51 @@ static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn,
70317028
}
70327029

70337030
/*
7034-
* Only struct pages that correspond to ranges defined by memblock.memory
7035-
* are zeroed and initialized by going through __init_single_page() during
7036-
* memmap_init().
7037-
*
7038-
* But, there could be struct pages that correspond to holes in
7039-
* memblock.memory. This can happen because of the following reasons:
7040-
* - phyiscal memory bank size is not necessarily the exact multiple of the
7041-
* arbitrary section size
7042-
* - early reserved memory may not be listed in memblock.memory
7043-
* - memory layouts defined with memmap= kernel parameter may not align
7044-
* nicely with memmap sections
7031+
* Only struct pages that are backed by physical memory are zeroed and
7032+
* initialized by going through __init_single_page(). But, there are some
7033+
* struct pages which are reserved in memblock allocator and their fields
7034+
* may be accessed (for example page_to_pfn() on some configuration accesses
7035+
* flags). We must explicitly initialize those struct pages.
70457036
*
7046-
* Explicitly initialize those struct pages so that:
7047-
* - PG_Reserved is set
7048-
* - zone link is set accorging to the architecture constrains
7049-
* - node is set to node id of the next populated region except for the
7050-
* trailing hole where last node id is used
7037+
* This function also addresses a similar issue where struct pages are left
7038+
* uninitialized because the physical address range is not covered by
7039+
* memblock.memory or memblock.reserved. That could happen when memblock
7040+
* layout is manually configured via memmap=, or when the highest physical
7041+
* address (max_pfn) does not end on a section boundary.
70517042
*/
7052-
static void __init init_zone_unavailable_mem(int zone)
7043+
static void __init init_unavailable_mem(void)
70537044
{
7054-
unsigned long start, end;
7055-
int i, nid;
7056-
u64 pgcnt;
7057-
unsigned long next = 0;
7045+
phys_addr_t start, end;
7046+
u64 i, pgcnt;
7047+
phys_addr_t next = 0;
70587048

70597049
/*
7060-
* Loop through holes in memblock.memory and initialize struct
7061-
* pages corresponding to these holes
7050+
* Loop through unavailable ranges not covered by memblock.memory.
70627051
*/
70637052
pgcnt = 0;
7064-
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7053+
for_each_mem_range(i, &start, &end) {
70657054
if (next < start)
7066-
pgcnt += init_unavailable_range(next, start, zone, nid);
7055+
pgcnt += init_unavailable_range(PFN_DOWN(next),
7056+
PFN_UP(start));
70677057
next = end;
70687058
}
70697059

70707060
/*
7071-
* Last section may surpass the actual end of memory (e.g. we can
7072-
* have 1Gb section and 512Mb of RAM pouplated).
7073-
* Make sure that memmap has a well defined state in this case.
7061+
* Early sections always have a fully populated memmap for the whole
7062+
* section - see pfn_valid(). If the last section has holes at the
7063+
* end and that section is marked "online", the memmap will be
7064+
* considered initialized. Make sure that memmap has a well defined
7065+
* state.
70747066
*/
7075-
end = round_up(max_pfn, PAGES_PER_SECTION);
7076-
pgcnt += init_unavailable_range(next, end, zone, nid);
7067+
pgcnt += init_unavailable_range(PFN_DOWN(next),
7068+
round_up(max_pfn, PAGES_PER_SECTION));
70777069

70787070
/*
70797071
* Struct pages that do not have backing memory. This could be because
70807072
* firmware is using some of this memory, or for some other reasons.
70817073
*/
70827074
if (pgcnt)
7083-
pr_info("Zone %s: zeroed struct page in unavailable ranges: %lld pages", zone_names[zone], pgcnt);
7084-
}
7085-
7086-
static void __init init_unavailable_mem(void)
7087-
{
7088-
int zone;
7089-
7090-
for (zone = 0; zone < ZONE_MOVABLE; zone++)
7091-
init_zone_unavailable_mem(zone);
7075+
pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
70927076
}
70937077
#else
70947078
static inline void __init init_unavailable_mem(void)

0 commit comments

Comments
 (0)