mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
zsmalloc: rework zspage chain size selection
Patch series "zsmalloc: make zspage chain size configurable". Computers are bad at division. We currently decide the best zspage chain size (max number of physical pages per-zspage) by looking at a `used percentage` value. This is not enough as we lose precision during usage percentage calculations For example, let's look at size class 208: pages per zspage wasted bytes used% 1 144 96 2 80 99 3 16 99 4 160 99 Current algorithm will select 2 page per zspage configuration, as it's the first one to reach 99%. However, 3 pages per zspage waste less memory. Change algorithm and select zspage configuration that has lowest wasted value. Link: https://lkml.kernel.org/r/20230118005210.2814763-1-senozhatsky@chromium.org Link: https://lkml.kernel.org/r/20230118005210.2814763-2-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Acked-by: Minchan Kim <minchan@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
076cf7ea67
commit
6260ae3583
@ -822,42 +822,6 @@ static enum fullness_group fix_fullness_group(struct size_class *class,
|
||||
return newfg;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to decide on how many pages to link together
|
||||
* to form a zspage for each size class. This is important
|
||||
* to reduce wastage due to unusable space left at end of
|
||||
* each zspage which is given as:
|
||||
* wastage = Zp % class_size
|
||||
* usage = Zp - wastage
|
||||
* where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
|
||||
*
|
||||
* For example, for size class of 3/8 * PAGE_SIZE, we should
|
||||
* link together 3 PAGE_SIZE sized pages to form a zspage
|
||||
* since then we can perfectly fit in 8 such objects.
|
||||
*/
|
||||
static int get_pages_per_zspage(int class_size)
|
||||
{
|
||||
int i, max_usedpc = 0;
|
||||
/* zspage order which gives maximum used size per KB */
|
||||
int max_usedpc_order = 1;
|
||||
|
||||
for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
|
||||
int zspage_size;
|
||||
int waste, usedpc;
|
||||
|
||||
zspage_size = i * PAGE_SIZE;
|
||||
waste = zspage_size % class_size;
|
||||
usedpc = (zspage_size - waste) * 100 / zspage_size;
|
||||
|
||||
if (usedpc > max_usedpc) {
|
||||
max_usedpc = usedpc;
|
||||
max_usedpc_order = i;
|
||||
}
|
||||
}
|
||||
|
||||
return max_usedpc_order;
|
||||
}
|
||||
|
||||
static struct zspage *get_zspage(struct page *page)
|
||||
{
|
||||
struct zspage *zspage = (struct zspage *)page_private(page);
|
||||
@ -2401,6 +2365,24 @@ static int zs_register_shrinker(struct zs_pool *pool)
|
||||
pool->name);
|
||||
}
|
||||
|
||||
static int calculate_zspage_chain_size(int class_size)
|
||||
{
|
||||
int i, min_waste = INT_MAX;
|
||||
int chain_size = 1;
|
||||
|
||||
for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
|
||||
int waste;
|
||||
|
||||
waste = (i * PAGE_SIZE) % class_size;
|
||||
if (waste < min_waste) {
|
||||
min_waste = waste;
|
||||
chain_size = i;
|
||||
}
|
||||
}
|
||||
|
||||
return chain_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* zs_create_pool - Creates an allocation pool to work from.
|
||||
* @name: pool name to be created
|
||||
@ -2445,7 +2427,7 @@ struct zs_pool *zs_create_pool(const char *name)
|
||||
size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
|
||||
if (size > ZS_MAX_ALLOC_SIZE)
|
||||
size = ZS_MAX_ALLOC_SIZE;
|
||||
pages_per_zspage = get_pages_per_zspage(size);
|
||||
pages_per_zspage = calculate_zspage_chain_size(size);
|
||||
objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user