mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
mm: return a folio from read_swap_cache_async()
The only two callers simply call put_page() on the page returned, so they're happier calling folio_put(). Saves two calls to compound_head(). Link: https://lkml.kernel.org/r/20231213215842.671461-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
69fe7d67cb
commit
6e03492e9d
22
mm/madvise.c
22
mm/madvise.c
@ -180,7 +180,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
pte_t pte;
|
||||
swp_entry_t entry;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
if (!ptep++) {
|
||||
ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
@ -198,10 +198,10 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
ptep = NULL;
|
||||
|
||||
page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
|
||||
folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
|
||||
vma, addr, &splug);
|
||||
if (page)
|
||||
put_page(page);
|
||||
if (folio)
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
if (ptep)
|
||||
@ -223,17 +223,17 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
|
||||
{
|
||||
XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
|
||||
pgoff_t end_index = linear_page_index(vma, end) - 1;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
struct swap_iocb *splug = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, page, end_index) {
|
||||
xas_for_each(&xas, folio, end_index) {
|
||||
unsigned long addr;
|
||||
swp_entry_t entry;
|
||||
|
||||
if (!xa_is_value(page))
|
||||
if (!xa_is_value(folio))
|
||||
continue;
|
||||
entry = radix_to_swp_entry(page);
|
||||
entry = radix_to_swp_entry(folio);
|
||||
/* There might be swapin error entries in shmem mapping. */
|
||||
if (non_swap_entry(entry))
|
||||
continue;
|
||||
@ -243,10 +243,10 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
|
||||
xas_pause(&xas);
|
||||
rcu_read_unlock();
|
||||
|
||||
page = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
|
||||
folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
|
||||
vma, addr, &splug);
|
||||
if (page)
|
||||
put_page(page);
|
||||
if (folio)
|
||||
folio_put(folio);
|
||||
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
@ -46,9 +46,8 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
|
||||
struct folio *filemap_get_incore_folio(struct address_space *mapping,
|
||||
pgoff_t index);
|
||||
|
||||
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
struct swap_iocb **plug);
|
||||
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
|
||||
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
|
||||
|
@ -533,9 +533,9 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
* __read_swap_cache_async() call them and swap_read_folio() holds the
|
||||
* swap cache folio lock.
|
||||
*/
|
||||
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr, struct swap_iocb **plug)
|
||||
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
struct swap_iocb **plug)
|
||||
{
|
||||
bool page_allocated;
|
||||
struct mempolicy *mpol;
|
||||
@ -549,7 +549,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
|
||||
if (page_allocated)
|
||||
swap_read_folio(folio, false, plug);
|
||||
return folio_file_page(folio, swp_offset(entry));
|
||||
return folio;
|
||||
}
|
||||
|
||||
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
|
||||
|
Loading…
Reference in New Issue
Block a user