mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
mm/zsmalloc: use a proper page type
Let's clean it up: use a proper page type and store our data (offset into a page) in the lower 16 bit as documented. We won't be able to support 256 KiB base pages, which is acceptable. Teach Kconfig to handle that cleanly using a new CONFIG_HAVE_ZSMALLOC. Based on this, we should do a proper "struct zsdesc" conversion, as proposed in [1]. This removes the last _mapcount/page_type offender. [1] https://lore.kernel.org/all/20231130101242.2590384-1-42.hyeyoo@gmail.com/ Link: https://lkml.kernel.org/r/20240529111904.2069608-4-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org> [zram/zsmalloc workloads] Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8db00ad564
commit
43d746dc49
@ -2,6 +2,7 @@
|
||||
config ZRAM
|
||||
tristate "Compressed RAM block device support"
|
||||
depends on BLOCK && SYSFS && MMU
|
||||
depends on HAVE_ZSMALLOC
|
||||
depends on CRYPTO_LZO || CRYPTO_ZSTD || CRYPTO_LZ4 || CRYPTO_LZ4HC || CRYPTO_842
|
||||
select ZSMALLOC
|
||||
help
|
||||
|
@ -947,6 +947,7 @@ enum pagetype {
|
||||
PG_guard = 0x08000000,
|
||||
PG_hugetlb = 0x04000000,
|
||||
PG_slab = 0x02000000,
|
||||
PG_zsmalloc = 0x01000000,
|
||||
|
||||
PAGE_TYPE_BASE = 0x80000000,
|
||||
|
||||
@ -1071,6 +1072,8 @@ FOLIO_TYPE_OPS(hugetlb, hugetlb)
|
||||
FOLIO_TEST_FLAG_FALSE(hugetlb)
|
||||
#endif
|
||||
|
||||
PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
|
||||
|
||||
/**
|
||||
* PageHuge - Determine if the page belongs to hugetlbfs
|
||||
* @page: The page to test.
|
||||
|
10
mm/Kconfig
10
mm/Kconfig
@ -128,7 +128,7 @@ config ZSWAP_COMPRESSOR_DEFAULT
|
||||
choice
|
||||
prompt "Default allocator"
|
||||
depends on ZSWAP
|
||||
default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU
|
||||
default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if HAVE_ZSMALLOC
|
||||
default ZSWAP_ZPOOL_DEFAULT_ZBUD
|
||||
help
|
||||
Selects the default allocator for the compressed cache for
|
||||
@ -154,6 +154,7 @@ config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
|
||||
|
||||
config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
|
||||
bool "zsmalloc"
|
||||
depends on HAVE_ZSMALLOC
|
||||
select ZSMALLOC
|
||||
help
|
||||
Use the zsmalloc allocator as the default allocator.
|
||||
@ -186,10 +187,15 @@ config Z3FOLD
|
||||
page. It is a ZBUD derivative so the simplicity and determinism are
|
||||
still there.
|
||||
|
||||
config HAVE_ZSMALLOC
|
||||
def_bool y
|
||||
depends on MMU
|
||||
depends on PAGE_SIZE_LESS_THAN_256KB # we want <= 64 KiB
|
||||
|
||||
config ZSMALLOC
|
||||
tristate
|
||||
prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
|
||||
depends on MMU
|
||||
depends on HAVE_ZSMALLOC
|
||||
help
|
||||
zsmalloc is a slab-based memory allocator designed to store
|
||||
pages of various compression levels efficiently. It achieves
|
||||
|
@ -20,7 +20,8 @@
|
||||
* page->index: links together all component pages of a zspage
|
||||
* For the huge page, this is always 0, so we use this field
|
||||
* to store handle.
|
||||
* page->page_type: first object offset in a subpage of zspage
|
||||
* page->page_type: PG_zsmalloc, lower 16 bit locate the first object
|
||||
* offset in a subpage of a zspage
|
||||
*
|
||||
* Usage of struct page flags:
|
||||
* PG_private: identifies the first component page
|
||||
@ -450,14 +451,28 @@ static inline struct page *get_first_page(struct zspage *zspage)
|
||||
return first_page;
|
||||
}
|
||||
|
||||
#define FIRST_OBJ_PAGE_TYPE_MASK 0xffff
|
||||
|
||||
static inline void reset_first_obj_offset(struct page *page)
|
||||
{
|
||||
VM_WARN_ON_ONCE(!PageZsmalloc(page));
|
||||
page->page_type |= FIRST_OBJ_PAGE_TYPE_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned int get_first_obj_offset(struct page *page)
|
||||
{
|
||||
return page->page_type;
|
||||
VM_WARN_ON_ONCE(!PageZsmalloc(page));
|
||||
return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK;
|
||||
}
|
||||
|
||||
static inline void set_first_obj_offset(struct page *page, unsigned int offset)
|
||||
{
|
||||
page->page_type = offset;
|
||||
/* With 16 bit available, we can support offsets into 64 KiB pages. */
|
||||
BUILD_BUG_ON(PAGE_SIZE > SZ_64K);
|
||||
VM_WARN_ON_ONCE(!PageZsmalloc(page));
|
||||
VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK);
|
||||
page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK;
|
||||
page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned int get_freeobj(struct zspage *zspage)
|
||||
@ -791,8 +806,9 @@ static void reset_page(struct page *page)
|
||||
__ClearPageMovable(page);
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
page_mapcount_reset(page);
|
||||
page->index = 0;
|
||||
reset_first_obj_offset(page);
|
||||
__ClearPageZsmalloc(page);
|
||||
}
|
||||
|
||||
static int trylock_zspage(struct zspage *zspage)
|
||||
@ -965,11 +981,13 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
|
||||
if (!page) {
|
||||
while (--i >= 0) {
|
||||
dec_zone_page_state(pages[i], NR_ZSPAGES);
|
||||
__ClearPageZsmalloc(pages[i]);
|
||||
__free_page(pages[i]);
|
||||
}
|
||||
cache_free_zspage(pool, zspage);
|
||||
return NULL;
|
||||
}
|
||||
__SetPageZsmalloc(page);
|
||||
|
||||
inc_zone_page_state(page, NR_ZSPAGES);
|
||||
pages[i] = page;
|
||||
@ -1754,6 +1772,9 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
|
||||
VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
||||
|
||||
/* We're committed, tell the world that this is a Zsmalloc page. */
|
||||
__SetPageZsmalloc(newpage);
|
||||
|
||||
/* The page is locked, so this pointer must remain valid */
|
||||
zspage = get_zspage(page);
|
||||
pool = zspage->pool;
|
||||
|
Loading…
Reference in New Issue
Block a user