Skip to content

Commit 4a65429

Browse files
gerald-schaeferMartin Schwidefsky
authored andcommitted
s390/mm: fix zone calculation in arch_add_memory()
Standby (hotplug) memory should be added to ZONE_MOVABLE on s390. After commit 199071f "s390/mm: make arch_add_memory() NUMA aware", arch_add_memory() used memblock_end_of_DRAM() to find out the end of ZONE_NORMAL and the beginning of ZONE_MOVABLE. However, commit 7f36e3e "memory-hotplug: add hot-added memory ranges to memblock before allocate node_data for a node." moved the call of memblock_add_node() before the call of arch_add_memory() in add_memory_resource(), and thus changed the return value of memblock_end_of_DRAM() when called in arch_add_memory(). As a result, arch_add_memory() will think that all memory blocks should be added to ZONE_NORMAL. Fix this by changing the logic in arch_add_memory() so that it will manually iterate over all zones of a given node to find out which zone a memory block should be added to. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
1 parent 47ece7f commit 4a65429

1 file changed

Lines changed: 21 additions & 17 deletions

File tree

arch/s390/mm/init.c

Lines changed: 21 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
151151
#ifdef CONFIG_MEMORY_HOTPLUG
152152
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
153153
{
154-
unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
155-
unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
154+
unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
156155
unsigned long start_pfn = PFN_DOWN(start);
157156
unsigned long size_pages = PFN_DOWN(size);
158-
unsigned long nr_pages;
159-
int rc, zone_enum;
157+
pg_data_t *pgdat = NODE_DATA(nid);
158+
struct zone *zone;
159+
int rc, i;
160160

161161
rc = vmem_add_mapping(start, size);
162162
if (rc)
163163
return rc;
164164

165-
while (size_pages > 0) {
166-
if (start_pfn < dma_end_pfn) {
167-
nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
168-
dma_end_pfn - start_pfn : size_pages;
169-
zone_enum = ZONE_DMA;
170-
} else if (start_pfn < normal_end_pfn) {
171-
nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
172-
normal_end_pfn - start_pfn : size_pages;
173-
zone_enum = ZONE_NORMAL;
165+
for (i = 0; i < MAX_NR_ZONES; i++) {
166+
zone = pgdat->node_zones + i;
167+
if (zone_idx(zone) != ZONE_MOVABLE) {
168+
/* Add range within existing zone limits, if possible */
169+
zone_start_pfn = zone->zone_start_pfn;
170+
zone_end_pfn = zone->zone_start_pfn +
171+
zone->spanned_pages;
174172
} else {
175-
nr_pages = size_pages;
176-
zone_enum = ZONE_MOVABLE;
173+
/* Add remaining range to ZONE_MOVABLE */
174+
zone_start_pfn = start_pfn;
175+
zone_end_pfn = start_pfn + size_pages;
177176
}
178-
rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
179-
start_pfn, size_pages);
177+
if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
178+
continue;
179+
nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
180+
zone_end_pfn - start_pfn : size_pages;
181+
rc = __add_pages(nid, zone, start_pfn, nr_pages);
180182
if (rc)
181183
break;
182184
start_pfn += nr_pages;
183185
size_pages -= nr_pages;
186+
if (!size_pages)
187+
break;
184188
}
185189
if (rc)
186190
vmem_remove_mapping(start, size);

0 commit comments

Comments
 (0)