mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
mm: add page_rmappable_folio() wrapper
folio_prep_large_rmappable() is being used repeatedly along with a conversion from page to folio, a check non-NULL, a check order > 1: wrap it all up into struct folio *page_rmappable_folio(struct page *). Link: https://lkml.kernel.org/r/8d92c6cf-eebe-748-e29c-c8ab224c741@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Hildenbrand <david@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Tejun heo <tj@kernel.org> Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Yang Shi <shy828301@gmail.com> Cc: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
2cafb58217
commit
23e4883248
@ -415,6 +415,15 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
|
||||
|
||||
void folio_undo_large_rmappable(struct folio *folio);
|
||||
|
||||
static inline struct folio *page_rmappable_folio(struct page *page)
|
||||
{
|
||||
struct folio *folio = (struct folio *)page;
|
||||
|
||||
if (folio && folio_order(folio) > 1)
|
||||
folio_prep_large_rmappable(folio);
|
||||
return folio;
|
||||
}
|
||||
|
||||
static inline void prep_compound_head(struct page *page, unsigned int order)
|
||||
{
|
||||
struct folio *folio = (struct folio *)page;
|
||||
|
@ -2122,10 +2122,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
|
||||
mpol_cond_put(pol);
|
||||
gfp |= __GFP_COMP;
|
||||
page = alloc_page_interleave(gfp, order, nid);
|
||||
folio = (struct folio *)page;
|
||||
if (folio && order > 1)
|
||||
folio_prep_large_rmappable(folio);
|
||||
goto out;
|
||||
return page_rmappable_folio(page);
|
||||
}
|
||||
|
||||
if (pol->mode == MPOL_PREFERRED_MANY) {
|
||||
@ -2135,10 +2132,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
|
||||
gfp |= __GFP_COMP;
|
||||
page = alloc_pages_preferred_many(gfp, order, node, pol);
|
||||
mpol_cond_put(pol);
|
||||
folio = (struct folio *)page;
|
||||
if (folio && order > 1)
|
||||
folio_prep_large_rmappable(folio);
|
||||
goto out;
|
||||
return page_rmappable_folio(page);
|
||||
}
|
||||
|
||||
if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
|
||||
@ -2232,12 +2226,7 @@ EXPORT_SYMBOL(alloc_pages);
|
||||
|
||||
struct folio *folio_alloc(gfp_t gfp, unsigned order)
|
||||
{
|
||||
struct page *page = alloc_pages(gfp | __GFP_COMP, order);
|
||||
struct folio *folio = (struct folio *)page;
|
||||
|
||||
if (folio && order > 1)
|
||||
folio_prep_large_rmappable(folio);
|
||||
return folio;
|
||||
return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order));
|
||||
}
|
||||
EXPORT_SYMBOL(folio_alloc);
|
||||
|
||||
|
@ -4598,12 +4598,8 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
|
||||
nodemask_t *nodemask)
|
||||
{
|
||||
struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
|
||||
preferred_nid, nodemask);
|
||||
struct folio *folio = (struct folio *)page;
|
||||
|
||||
if (folio && order > 1)
|
||||
folio_prep_large_rmappable(folio);
|
||||
return folio;
|
||||
preferred_nid, nodemask);
|
||||
return page_rmappable_folio(page);
|
||||
}
|
||||
EXPORT_SYMBOL(__folio_alloc);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user