Skip to content

Commit

Permalink
Merge branch 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/…
Browse files Browse the repository at this point in the history
…git/tip/tip into next

Pull x86 cdso updates from Peter Anvin:
 "Vdso cleanups and improvements largely from Andy Lutomirski.  This
  makes the vdso a lot less ''special''"

* 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/vdso, build: Make LE access macros clearer, host-safe
  x86/vdso, build: Fix cross-compilation from big-endian architectures
  x86/vdso, build: When vdso2c fails, unlink the output
  x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET
  x86, mm: Replace arch_vma_name with vm_ops->name for vsyscalls
  x86, mm: Improve _install_special_mapping and fix x86 vdso naming
  mm, fs: Add vm_ops->name as an alternative to arch_vma_name
  x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET
  x86, vdso: Remove vestiges of VDSO_PRELINK and some outdated comments
  x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO
  x86, vdso: Move the 32-bit vdso special pages after the text
  x86, vdso: Reimplement vdso.so preparation in build-time C
  x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c
  x86, vdso: Clean up 32-bit vs 64-bit vdso params
  x86, mm: Ensure correct alignment of the fixmap
  • Loading branch information
torvalds committed Jun 5, 2014
2 parents 2071b3e + c191920 commit a0abcf2
Show file tree
Hide file tree
Showing 40 changed files with 794 additions and 608 deletions.
8 changes: 4 additions & 4 deletions arch/x86/ia32/ia32_signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -383,8 +383,8 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
} else {
/* Return stub is in 32bit vsyscall page */
if (current->mm->context.vdso)
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
sigreturn);
restorer = current->mm->context.vdso +
selected_vdso32->sym___kernel_sigreturn;
else
restorer = &frame->retcode;
}
Expand Down Expand Up @@ -462,8 +462,8 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
else
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
rt_sigreturn);
restorer = current->mm->context.vdso +
selected_vdso32->sym___kernel_rt_sigreturn;
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

/*
Expand Down
35 changes: 20 additions & 15 deletions arch/x86/include/asm/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,12 @@ typedef struct user_fxsr_struct elf_fpxregset_t;

#include <asm/vdso.h>

extern unsigned int vdso_enabled;
#ifdef CONFIG_X86_64
extern unsigned int vdso64_enabled;
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
extern unsigned int vdso32_enabled;
#endif

/*
* This is used to ensure we don't load something for the wrong architecture.
Expand Down Expand Up @@ -269,9 +274,9 @@ extern int force_personality32;

struct task_struct;

#define ARCH_DLINFO_IA32(vdso_enabled) \
#define ARCH_DLINFO_IA32 \
do { \
if (vdso_enabled) { \
if (vdso32_enabled) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \
Expand All @@ -281,7 +286,7 @@ do { \

#define STACK_RND_MASK (0x7ff)

#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled)
#define ARCH_DLINFO ARCH_DLINFO_IA32

/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */

Expand All @@ -292,16 +297,17 @@ do { \

#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
if (vdso64_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso); \
(unsigned long __force)current->mm->context.vdso); \
} while (0)

/* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
#define ARCH_DLINFO_X32 \
do { \
if (vdso_enabled) \
if (vdso64_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso); \
(unsigned long __force)current->mm->context.vdso); \
} while (0)

#define AT_SYSINFO 32
Expand All @@ -310,7 +316,7 @@ do { \
if (test_thread_flag(TIF_X32)) \
ARCH_DLINFO_X32; \
else \
ARCH_DLINFO_IA32(sysctl_vsyscall32)
ARCH_DLINFO_IA32

#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)

Expand All @@ -319,18 +325,17 @@ else \
#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)

#define VDSO_ENTRY \
((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
((unsigned long)current->mm->context.vdso + \
selected_vdso32->sym___kernel_vsyscall)

struct linux_binprm;

#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
extern int x32_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);

extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
#define compat_arch_setup_additional_pages syscall32_setup_pages
extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages

extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
Expand Down
11 changes: 4 additions & 7 deletions arch/x86/include/asm/fixmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#include <linux/threads.h>
#include <asm/kmap_types.h>
#else
#include <asm/vsyscall.h>
#include <uapi/asm/vsyscall.h>
#endif

/*
Expand All @@ -41,7 +41,8 @@
extern unsigned long __FIXADDR_TOP;
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
#else
#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
#define FIXADDR_TOP (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
PAGE_SIZE)
#endif


Expand All @@ -68,11 +69,7 @@ enum fixed_addresses {
#ifdef CONFIG_X86_32
FIX_HOLE,
#else
VSYSCALL_LAST_PAGE,
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+ ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VVAR_PAGE,
VSYSCALL_HPET,
VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
#ifdef CONFIG_PARAVIRT_CLOCK
PVCLOCK_FIXMAP_BEGIN,
PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ typedef struct {
#endif

struct mutex lock;
void *vdso;
void __user *vdso;
} mm_context_t;

#ifdef CONFIG_SMP
Expand Down
2 changes: 0 additions & 2 deletions arch/x86/include/asm/proto.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ void ia32_syscall(void);
void ia32_cstar_target(void);
void ia32_sysenter_target(void);

void syscall32_cpu_init(void);

void x86_configure_nx(void);
void x86_report_nx(void);

Expand Down
78 changes: 33 additions & 45 deletions arch/x86/include/asm/vdso.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,63 +3,51 @@

#include <asm/page_types.h>
#include <linux/linkage.h>
#include <linux/init.h>

#ifdef __ASSEMBLER__
#ifndef __ASSEMBLER__

#define DEFINE_VDSO_IMAGE(symname, filename) \
__PAGE_ALIGNED_DATA ; \
.globl symname##_start, symname##_end ; \
.align PAGE_SIZE ; \
symname##_start: ; \
.incbin filename ; \
symname##_end: ; \
.align PAGE_SIZE /* extra data here leaks to userspace. */ ; \
\
.previous ; \
\
.globl symname##_pages ; \
.bss ; \
.align 8 ; \
.type symname##_pages, @object ; \
symname##_pages: ; \
.zero (symname##_end - symname##_start + PAGE_SIZE - 1) / PAGE_SIZE * (BITS_PER_LONG / 8) ; \
.size symname##_pages, .-symname##_pages
#include <linux/mm_types.h>

#else
struct vdso_image {
void *data;
unsigned long size; /* Always a multiple of PAGE_SIZE */

#define DECLARE_VDSO_IMAGE(symname) \
extern char symname##_start[], symname##_end[]; \
extern struct page *symname##_pages[]
/* text_mapping.pages is big enough for data/size page pointers */
struct vm_special_mapping text_mapping;

#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
unsigned long alt, alt_len;

#include <asm/vdso32.h>
unsigned long sym_end_mapping; /* Total size of the mapping */

DECLARE_VDSO_IMAGE(vdso32_int80);
#ifdef CONFIG_COMPAT
DECLARE_VDSO_IMAGE(vdso32_syscall);
unsigned long sym_vvar_page;
unsigned long sym_hpet_page;
unsigned long sym_VDSO32_NOTE_MASK;
unsigned long sym___kernel_sigreturn;
unsigned long sym___kernel_rt_sigreturn;
unsigned long sym___kernel_vsyscall;
unsigned long sym_VDSO32_SYSENTER_RETURN;
};

#ifdef CONFIG_X86_64
extern const struct vdso_image vdso_image_64;
#endif

#ifdef CONFIG_X86_X32
extern const struct vdso_image vdso_image_x32;
#endif
DECLARE_VDSO_IMAGE(vdso32_sysenter);

/*
* Given a pointer to the vDSO image, find the pointer to VDSO32_name
* as that symbol is defined in the vDSO sources or linker script.
*/
#define VDSO32_SYMBOL(base, name) \
({ \
extern const char VDSO32_##name[]; \
(void __user *)(VDSO32_##name + (unsigned long)(base)); \
})
#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
extern const struct vdso_image vdso_image_32_int80;
#ifdef CONFIG_COMPAT
extern const struct vdso_image vdso_image_32_syscall;
#endif
extern const struct vdso_image vdso_image_32_sysenter;

/*
* These symbols are defined with the addresses in the vsyscall page.
* See vsyscall-sigreturn.S.
*/
extern void __user __kernel_sigreturn;
extern void __user __kernel_rt_sigreturn;
extern const struct vdso_image *selected_vdso32;
#endif

void __init patch_vdso32(void *vdso, size_t len);
extern void __init init_vdso_image(const struct vdso_image *image);

#endif /* __ASSEMBLER__ */

Expand Down
11 changes: 0 additions & 11 deletions arch/x86/include/asm/vdso32.h

This file was deleted.

20 changes: 1 addition & 19 deletions arch/x86/include/asm/vvar.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,31 +29,13 @@

#else

#ifdef BUILD_VDSO32
extern char __vvar_page;

#define DECLARE_VVAR(offset, type, name) \
extern type vvar_ ## name __attribute__((visibility("hidden")));

#define VVAR(name) (vvar_ ## name)

#else

extern char __vvar_page;

/* Base address of vvars. This is not ABI. */
#ifdef CONFIG_X86_64
#define VVAR_ADDRESS (-10*1024*1024 - 4096)
#else
#define VVAR_ADDRESS (&__vvar_page)
#endif

#define DECLARE_VVAR(offset, type, name) \
static type const * const vvaraddr_ ## name = \
(void *)(VVAR_ADDRESS + (offset));

#define VVAR(name) (*vvaraddr_ ## name)
#endif

#define DEFINE_VVAR(type, name) \
type name \
__attribute__((section(".vvar_" #name), aligned(16))) __visible
Expand Down
7 changes: 1 addition & 6 deletions arch/x86/include/uapi/asm/vsyscall.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,6 @@ enum vsyscall_num {
__NR_vgetcpu,
};

#define VSYSCALL_START (-10UL << 20)
#define VSYSCALL_SIZE 1024
#define VSYSCALL_END (-2UL << 20)
#define VSYSCALL_MAPPED_PAGES 1
#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))

#define VSYSCALL_ADDR (-10UL << 20)

#endif /* _UAPI_ASM_X86_VSYSCALL_H */
33 changes: 33 additions & 0 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <asm/processor.h>
#include <asm/debugreg.h>
#include <asm/sections.h>
#include <asm/vsyscall.h>
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <asm/pgtable.h>
Expand Down Expand Up @@ -953,6 +954,38 @@ static void vgetcpu_set_mode(void)
else
vgetcpu_mode = VGETCPU_LSL;
}

/* May not be __init: called during resume */
static void syscall32_cpu_init(void)
{
/* Load these always in case some future AMD CPU supports
SYSENTER from compat mode too. */
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);

wrmsrl(MSR_CSTAR, ia32_cstar_target);
}
#endif

#ifdef CONFIG_X86_32
void enable_sep_cpu(void)
{
int cpu = get_cpu();
struct tss_struct *tss = &per_cpu(init_tss, cpu);

if (!boot_cpu_has(X86_FEATURE_SEP)) {
put_cpu();
return;
}

tss->x86_tss.ss1 = __KERNEL_CS;
tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
put_cpu();
}
#endif

void __init identify_boot_cpu(void)
Expand Down
3 changes: 0 additions & 3 deletions arch/x86/kernel/hpet.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,6 @@ static inline void hpet_writel(unsigned int d, unsigned int a)
static inline void hpet_set_mapping(void)
{
hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
#ifdef CONFIG_X86_64
__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
#endif
}

static inline void hpet_clear_mapping(void)
Expand Down
6 changes: 4 additions & 2 deletions arch/x86/kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,8 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
}

if (current->mm->context.vdso)
restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
restorer = current->mm->context.vdso +
selected_vdso32->sym___kernel_sigreturn;
else
restorer = &frame->retcode;
if (ksig->ka.sa.sa_flags & SA_RESTORER)
Expand Down Expand Up @@ -361,7 +362,8 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
save_altstack_ex(&frame->uc.uc_stack, regs->sp);

/* Set up to return from userspace. */
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
restorer = current->mm->context.vdso +
selected_vdso32->sym___kernel_sigreturn;
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
put_user_ex(restorer, &frame->pretcode);
Expand Down
Loading

0 comments on commit a0abcf2

Please sign in to comment.