mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
mm/mm_init.c: remove meaningless calculation of zone->managed_pages in free_area_init_core()
Currently, in free_area_init_core(), when initialize zone's field, a rough value is set to zone->managed_pages. That value is calculated by (zone->present_pages - memmap_pages). In the meantime, add the value to nr_all_pages and nr_kernel_pages which represent all free pages of system (only low memory or including HIGHMEM memory separately). Both of them are gonna be used in alloc_large_system_hash(). However, the rough calculation and setting of zone->managed_pages is meaningless because a) memmap pages are allocated on units of node in sparse_init() or alloc_node_mem_map(pgdat); The simple (zone->present_pages - memmap_pages) is too rough to make sense for zone; b) the set zone->managed_pages will be zeroed out and reset with acutal value in mem_init() via memblock_free_all(). Before the resetting, no buddy allocation request is issued. Here, remove the meaningless and complicated calculation of (zone->present_pages - memmap_pages), directly set zone->managed_pages as zone->present_pages for now. It will be adjusted in mem_init(). And also remove the assignment of nr_all_pages and nr_kernel_pages in free_area_init_core(). Instead, call the newly added calc_nr_kernel_pages() to count up all free but not reserved memory in memblock and assign to nr_all_pages and nr_kernel_pages. The counting excludes memmap_pages, and other kernel used data, which is more accurate than old way and simpler, and can also cover the ppc required arch_reserved_kernel_pages() case. And also clean up the outdated code comment above free_area_init_core(). And free_area_init_core() is easy to understand now, no need to add words to explain. [bhe@redhat.com: initialize zone->managed_pages as zone->present_pages for now] Link: https://lkml.kernel.org/r/ZgU0bsJ2FEjykvju@MiWiFi-R3L-srv Link: https://lkml.kernel.org/r/20240325145646.1044760-5-bhe@redhat.com Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8ad4184985
commit
0ac5e785dc
46
mm/mm_init.c
46
mm/mm_init.c
@ -1566,15 +1566,6 @@ void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set up the zone data structures:
|
||||
* - mark all pages reserved
|
||||
* - mark all memory queues empty
|
||||
* - clear the memory bitmaps
|
||||
*
|
||||
* NOTE: pgdat should get zeroed by caller.
|
||||
* NOTE: this function is only called during early init.
|
||||
*/
|
||||
static void __init free_area_init_core(struct pglist_data *pgdat)
|
||||
{
|
||||
enum zone_type j;
|
||||
@ -1585,41 +1576,13 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
|
||||
|
||||
for (j = 0; j < MAX_NR_ZONES; j++) {
|
||||
struct zone *zone = pgdat->node_zones + j;
|
||||
unsigned long size, freesize, memmap_pages;
|
||||
|
||||
size = zone->spanned_pages;
|
||||
freesize = zone->present_pages;
|
||||
unsigned long size = zone->spanned_pages;
|
||||
|
||||
/*
|
||||
* Adjust freesize so that it accounts for how much memory
|
||||
* is used by this zone for memmap. This affects the watermark
|
||||
* and per-cpu initialisations
|
||||
* Initialize zone->managed_pages as 0 , it will be reset
|
||||
* when memblock allocator frees pages into buddy system.
|
||||
*/
|
||||
memmap_pages = calc_memmap_size(size, freesize);
|
||||
if (!is_highmem_idx(j)) {
|
||||
if (freesize >= memmap_pages) {
|
||||
freesize -= memmap_pages;
|
||||
if (memmap_pages)
|
||||
pr_debug(" %s zone: %lu pages used for memmap\n",
|
||||
zone_names[j], memmap_pages);
|
||||
} else
|
||||
pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
|
||||
zone_names[j], memmap_pages, freesize);
|
||||
}
|
||||
|
||||
if (!is_highmem_idx(j))
|
||||
nr_kernel_pages += freesize;
|
||||
/* Charge for highmem memmap if there are enough kernel pages */
|
||||
else if (nr_kernel_pages > memmap_pages * 2)
|
||||
nr_kernel_pages -= memmap_pages;
|
||||
nr_all_pages += freesize;
|
||||
|
||||
/*
|
||||
* Set an approximate value for lowmem here, it will be adjusted
|
||||
* when the bootmem allocator frees pages into the buddy system.
|
||||
* And all highmem pages will be managed by the buddy system.
|
||||
*/
|
||||
zone_init_internals(zone, j, nid, freesize);
|
||||
zone_init_internals(zone, j, nid, zone->present_pages);
|
||||
|
||||
if (!size)
|
||||
continue;
|
||||
@ -1916,6 +1879,7 @@ void __init free_area_init(unsigned long *max_zone_pfn)
|
||||
check_for_memory(pgdat);
|
||||
}
|
||||
|
||||
calc_nr_kernel_pages();
|
||||
memmap_init();
|
||||
|
||||
/* disable hash distribution for systems with a single node */
|
||||
|
Loading…
Reference in New Issue
Block a user