mm: remove PageSwapCache

This flag is now only used on folios, so we can remove all the page
accessors and reword the comments that refer to them.

Link: https://lkml.kernel.org/r/20240821193445.2294269-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-08-21 20:34:37 +01:00 committed by Andrew Morton
parent 6f394ee9dd
commit 32f51ead3d
5 changed files with 22 additions and 24 deletions

View File

@ -109,7 +109,7 @@ struct page {
/**
* @private: Mapping-private opaque data.
* Usually used for buffer_heads if PagePrivate.
* Used for swp_entry_t if PageSwapCache.
* Used for swp_entry_t if swapcache flag set.
* Indicates order in the buddy system if PageBuddy.
*/
unsigned long private;

View File

@ -574,15 +574,10 @@ static __always_inline bool folio_test_swapcache(const struct folio *folio)
test_bit(PG_swapcache, const_folio_flags(folio, 0));
}
static __always_inline bool PageSwapCache(const struct page *page)
{
return folio_test_swapcache(page_folio(page));
}
SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
#else
PAGEFLAG_FALSE(SwapCache, swapcache)
FOLIO_FLAG_FALSE(swapcache)
#endif
PAGEFLAG(Unevictable, unevictable, PF_HEAD)

View File

@ -909,12 +909,13 @@ static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
*/
while (!folio_try_get(folio)) {
/*
* Another check for page->mapping != expected_mapping would
* work here too. We have chosen the !PageSwapCache test to
* optimize the common case, when the page is or is about to
* be freed: PageSwapCache is cleared (under spin_lock_irq)
* in the ref_freeze section of __remove_mapping(); but Anon
* folio->mapping reset to NULL later, in free_pages_prepare().
* Another check for folio->mapping != expected_mapping
* would work here too. We have chosen to test the
* swapcache flag to optimize the common case, when the
* folio is or is about to be freed: the swapcache flag
* is cleared (under spin_lock_irq) in the ref_freeze
* section of __remove_mapping(); but anon folio->mapping
* is reset to NULL later, in free_pages_prepare().
*/
if (!folio_test_swapcache(folio))
goto stale;
@ -945,7 +946,7 @@ static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
stale:
/*
* We come here from above when page->mapping or !PageSwapCache
* We come here from above when folio->mapping or the swapcache flag
* suggests that the node is stale; but it might be under migration.
* We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
* before checking whether node->kpfn has been changed.
@ -1452,7 +1453,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
goto out;
/*
* We need the page lock to read a stable PageSwapCache in
* We need the folio lock to read a stable swapcache flag in
* write_protect_page(). We use trylock_page() instead of
* lock_page() because we don't want to wait here - we
* prefer to continue scanning and merging different pages,
@ -3123,7 +3124,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
* newfolio->mapping was set in advance; now we need smp_wmb()
* to make sure that the new stable_node->kpfn is visible
* to ksm_get_folio() before it can see that folio->mapping
* has gone stale (or that folio_test_swapcache has been cleared).
* has gone stale (or that the swapcache flag has been cleared).
*/
smp_wmb();
folio_set_stable_node(folio, NULL);

View File

@ -639,7 +639,8 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_migrate_ksm(newfolio, folio);
/*
* Please do not reorder this without considering how mm/ksm.c's
* ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
* ksm_get_folio() depends upon ksm_migrate_page() and the
* swapcache flag.
*/
if (folio_test_swapcache(folio))
folio_clear_swapcache(folio);

View File

@ -502,8 +502,8 @@ static int shmem_replace_entry(struct address_space *mapping,
* Sometimes, before we decide whether to proceed or to fail, we must check
* that an entry was not already brought back from swap by a racing thread.
*
* Checking page is not enough: by the time a SwapCache page is locked, it
* might be reused, and again be SwapCache, using the same swap as before.
* Checking folio is not enough: by the time a swapcache folio is locked, it
* might be reused, and again be swapcache, using the same swap as before.
*/
static bool shmem_confirm_swap(struct address_space *mapping,
pgoff_t index, swp_entry_t swap)
@ -1965,9 +1965,10 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
if (unlikely(error)) {
/*
* Is this possible? I think not, now that our callers check
* both PageSwapCache and page_private after getting page lock;
* but be defensive. Reverse old to newpage for clear and free.
* Is this possible? I think not, now that our callers
* check both the swapcache flag and folio->private
* after getting the folio lock; but be defensive.
* Reverse old to newpage for clear and free.
*/
old = new;
} else {