mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
97e3d26b5e
Seth found that the CPU-entry-area; the piece of per-cpu data that is mapped into the userspace page-tables for kPTI is not subject to any randomization -- irrespective of kASLR settings. On x86_64 a whole P4D (512 GB) of virtual address space is reserved for this structure, which is plenty large enough to randomize things a little. As such, use a straight forward randomization scheme that avoids duplicates to spread the existing CPUs over the available space. [ bp: Fix le build. ] Reported-by: Seth Jenkins <sethjenkins@google.com> Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Borislav Petkov <bp@suse.de>
23 lines
623 B
C
23 lines
623 B
C
#ifndef _ASM_X86_PGTABLE_AREAS_H
|
|
#define _ASM_X86_PGTABLE_AREAS_H
|
|
|
|
#ifdef CONFIG_X86_32
|
|
# include <asm/pgtable_32_areas.h>
|
|
#endif
|
|
|
|
/* Single page reserved for the readonly IDT mapping: */
|
|
#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
|
|
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
|
|
|
|
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
|
|
(CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
|
|
CPU_ENTRY_AREA_BASE)
|
|
#else
|
|
#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_PGTABLE_AREAS_H */
|