mm: compaction: early termination in compact_nodes()

No need to continue try compact memory if pending fatal signal, allow loop
termination earlier in compact_nodes().

The existing fatal_signal_pending() check does make compact_zone()
break out of the while loop, but it still enters the next zone/next
nid, and some unnecessary functions(eg, lru_add_drain) are called. 
There was no observable benefit from the new test, it is just found
from code inspection when refactoring compact_node().

Link: https://lkml.kernel.org/r/20240208022508.1771534-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kefeng Wang 2024-02-08 10:25:08 +08:00 committed by Andrew Morton
parent 55e78c933d
commit f6f3f27597

View File

@ -2808,7 +2808,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
* reaching score targets due to various back-off conditions, such as, * reaching score targets due to various back-off conditions, such as,
* contention on per-node or per-zone locks. * contention on per-node or per-zone locks.
*/ */
static void compact_node(pg_data_t *pgdat, bool proactive) static int compact_node(pg_data_t *pgdat, bool proactive)
{ {
int zoneid; int zoneid;
struct zone *zone; struct zone *zone;
@ -2826,6 +2826,9 @@ static void compact_node(pg_data_t *pgdat, bool proactive)
if (!populated_zone(zone)) if (!populated_zone(zone))
continue; continue;
if (fatal_signal_pending(current))
return -EINTR;
cc.zone = zone; cc.zone = zone;
compact_zone(&cc, NULL); compact_zone(&cc, NULL);
@ -2837,18 +2840,25 @@ static void compact_node(pg_data_t *pgdat, bool proactive)
cc.total_free_scanned); cc.total_free_scanned);
} }
} }
return 0;
} }
/* Compact all zones of all nodes in the system */ /* Compact all zones of all nodes in the system */
static void compact_nodes(void) static int compact_nodes(void)
{ {
int nid; int ret, nid;
/* Flush pending updates to the LRU lists */ /* Flush pending updates to the LRU lists */
lru_add_drain_all(); lru_add_drain_all();
for_each_online_node(nid) for_each_online_node(nid) {
compact_node(NODE_DATA(nid), false); ret = compact_node(NODE_DATA(nid), false);
if (ret)
return ret;
}
return 0;
} }
static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
@ -2894,9 +2904,9 @@ static int sysctl_compaction_handler(struct ctl_table *table, int write,
return -EINVAL; return -EINVAL;
if (write) if (write)
compact_nodes(); ret = compact_nodes();
return 0; return ret;
} }
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)