mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:18:58 +00:00
d2f8671045
There are two pages in one TLB entry on LoongArch system. For kernel space, it requires both two pte entries (buddies) with PAGE_GLOBAL bit set, otherwise HW treats it as non-global tlb, there will be potential problems if tlb entry for kernel space is not global. Such as fail to flush kernel tlb with the function local_flush_tlb_kernel_range() which supposed only flush tlb with global bit. Kernel address space areas include percpu, vmalloc, vmemmap, fixmap and kasan areas. For these areas both two consecutive page table entries should be enabled with PAGE_GLOBAL bit. So with function set_pte() and pte_clear(), pte buddy entry is checked and set besides its own pte entry. However it is not atomic operation to set both two pte entries, there is problem with test_vmalloc test case. So function kernel_pte_init() is added to init a pte table when it is created for kernel address space, and the default initial pte value is PAGE_GLOBAL rather than zero at beginning. Then only its own pte entry need update with function set_pte() and pte_clear(), nothing to do with the pte buddy entry. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
167 lines
3.2 KiB
C
167 lines
3.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/export.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
struct page *dmw_virt_to_page(unsigned long kaddr)
|
|
{
|
|
return phys_to_page(__pa(kaddr));
|
|
}
|
|
EXPORT_SYMBOL(dmw_virt_to_page);
|
|
|
|
struct page *tlb_virt_to_page(unsigned long kaddr)
|
|
{
|
|
return phys_to_page(pfn_to_phys(pte_pfn(*virt_to_kpte(kaddr))));
|
|
}
|
|
EXPORT_SYMBOL(tlb_virt_to_page);
|
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *init, *ret = NULL;
|
|
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
|
|
|
|
if (ptdesc) {
|
|
ret = (pgd_t *)ptdesc_address(ptdesc);
|
|
init = pgd_offset(&init_mm, 0UL);
|
|
pgd_init(ret);
|
|
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
|
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(pgd_alloc);
|
|
|
|
void pgd_init(void *addr)
|
|
{
|
|
unsigned long *p, *end;
|
|
unsigned long entry;
|
|
|
|
#if !defined(__PAGETABLE_PUD_FOLDED)
|
|
entry = (unsigned long)invalid_pud_table;
|
|
#elif !defined(__PAGETABLE_PMD_FOLDED)
|
|
entry = (unsigned long)invalid_pmd_table;
|
|
#else
|
|
entry = (unsigned long)invalid_pte_table;
|
|
#endif
|
|
|
|
p = (unsigned long *)addr;
|
|
end = p + PTRS_PER_PGD;
|
|
|
|
do {
|
|
p[0] = entry;
|
|
p[1] = entry;
|
|
p[2] = entry;
|
|
p[3] = entry;
|
|
p[4] = entry;
|
|
p += 8;
|
|
p[-3] = entry;
|
|
p[-2] = entry;
|
|
p[-1] = entry;
|
|
} while (p != end);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pgd_init);
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
void pmd_init(void *addr)
|
|
{
|
|
unsigned long *p, *end;
|
|
unsigned long pagetable = (unsigned long)invalid_pte_table;
|
|
|
|
p = (unsigned long *)addr;
|
|
end = p + PTRS_PER_PMD;
|
|
|
|
do {
|
|
p[0] = pagetable;
|
|
p[1] = pagetable;
|
|
p[2] = pagetable;
|
|
p[3] = pagetable;
|
|
p[4] = pagetable;
|
|
p += 8;
|
|
p[-3] = pagetable;
|
|
p[-2] = pagetable;
|
|
p[-1] = pagetable;
|
|
} while (p != end);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pmd_init);
|
|
#endif
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
void pud_init(void *addr)
|
|
{
|
|
unsigned long *p, *end;
|
|
unsigned long pagetable = (unsigned long)invalid_pmd_table;
|
|
|
|
p = (unsigned long *)addr;
|
|
end = p + PTRS_PER_PUD;
|
|
|
|
do {
|
|
p[0] = pagetable;
|
|
p[1] = pagetable;
|
|
p[2] = pagetable;
|
|
p[3] = pagetable;
|
|
p[4] = pagetable;
|
|
p += 8;
|
|
p[-3] = pagetable;
|
|
p[-2] = pagetable;
|
|
p[-1] = pagetable;
|
|
} while (p != end);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pud_init);
|
|
#endif
|
|
|
|
void kernel_pte_init(void *addr)
|
|
{
|
|
unsigned long *p, *end;
|
|
|
|
p = (unsigned long *)addr;
|
|
end = p + PTRS_PER_PTE;
|
|
|
|
do {
|
|
p[0] = _PAGE_GLOBAL;
|
|
p[1] = _PAGE_GLOBAL;
|
|
p[2] = _PAGE_GLOBAL;
|
|
p[3] = _PAGE_GLOBAL;
|
|
p[4] = _PAGE_GLOBAL;
|
|
p += 8;
|
|
p[-3] = _PAGE_GLOBAL;
|
|
p[-2] = _PAGE_GLOBAL;
|
|
p[-1] = _PAGE_GLOBAL;
|
|
} while (p != end);
|
|
}
|
|
|
|
pmd_t mk_pmd(struct page *page, pgprot_t prot)
|
|
{
|
|
pmd_t pmd;
|
|
|
|
pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
|
|
|
|
return pmd;
|
|
}
|
|
|
|
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
pmd_t *pmdp, pmd_t pmd)
|
|
{
|
|
WRITE_ONCE(*pmdp, pmd);
|
|
flush_tlb_all();
|
|
}
|
|
|
|
void __init pagetable_init(void)
|
|
{
|
|
/* Initialize the entire pgd. */
|
|
pgd_init(swapper_pg_dir);
|
|
pgd_init(invalid_pg_dir);
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
pud_init(invalid_pud_table);
|
|
#endif
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
pmd_init(invalid_pmd_table);
|
|
#endif
|
|
}
|