|
|
|
/*
|
|
|
|
* Generate definitions needed by assembly language modules.
|
|
|
|
* This code generates raw asm output which is post-processed
|
|
|
|
* to extract and format the required data.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/crypto.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/suspend.h>
|
|
|
|
#include <asm/ucontext.h>
|
|
|
|
#include "sigframe.h"
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/fixmap.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/thread_info.h>
|
[PATCH] vdso: randomize the i386 vDSO by moving it into a vma
Move the i386 VDSO down into a vma and thus randomize it.
Besides the security implications, this feature also helps debuggers, which
can COW a vma-backed VDSO just like a normal DSO and can thus do
single-stepping and other debugging features.
It's good for hypervisors (Xen, VMWare) too, which typically live in the same
high-mapped address space as the VDSO, hence whenever the VDSO is used, they
get lots of guest pagefaults and have to fix such guest accesses up - which
slows things down instead of speeding things up (the primary purpose of the
VDSO).
There's a new CONFIG_COMPAT_VDSO (default=y) option, which provides support
for older glibcs that still rely on a prelinked high-mapped VDSO. Newer
distributions (using glibc 2.3.3 or later) can turn this option off. Turning
it off is also recommended for security reasons: attackers cannot use the
predictable high-mapped VDSO page as syscall trampoline anymore.
There is a new vdso=[0|1] boot option as well, and a runtime
/proc/sys/vm/vdso_enabled sysctl switch, that allows the VDSO to be turned
on/off.
(This version of the VDSO-randomization patch also has working ELF
coredumping, the previous patch crashed in the coredumping code.)
This code is a combined work of the exec-shield VDSO randomization
code and Gerd Hoffmann's hypervisor-centric VDSO patch. Rusty Russell
started this patch and i completed it.
[akpm@osdl.org: cleanups]
[akpm@osdl.org: compile fix]
[akpm@osdl.org: compile fix 2]
[akpm@osdl.org: compile fix 3]
[akpm@osdl.org: revernt MAXMEM change]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Cc: Gerd Hoffmann <kraxel@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
19 years ago
|
|
|
#include <asm/elf.h>
|
|
|
|
|
|
|
|
#include <xen/interface/xen.h>
|
|
|
|
|
|
|
|
#ifdef CONFIG_LGUEST_GUEST
|
|
|
|
#include <linux/lguest.h>
|
|
|
|
#include "../../../drivers/lguest/lg.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define DEFINE(sym, val) \
|
|
|
|
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
|
|
|
|
|
|
|
|
#define BLANK() asm volatile("\n->" : : )
|
|
|
|
|
|
|
|
#define OFFSET(sym, str, mem) \
|
|
|
|
DEFINE(sym, offsetof(struct str, mem));
|
|
|
|
|
|
|
|
/* workaround for a warning with -Wmissing-prototypes */
|
|
|
|
void foo(void);
|
|
|
|
|
|
|
|
void foo(void)
|
|
|
|
{
|
|
|
|
OFFSET(SIGCONTEXT_eax, sigcontext, eax);
|
|
|
|
OFFSET(SIGCONTEXT_ebx, sigcontext, ebx);
|
|
|
|
OFFSET(SIGCONTEXT_ecx, sigcontext, ecx);
|
|
|
|
OFFSET(SIGCONTEXT_edx, sigcontext, edx);
|
|
|
|
OFFSET(SIGCONTEXT_esi, sigcontext, esi);
|
|
|
|
OFFSET(SIGCONTEXT_edi, sigcontext, edi);
|
|
|
|
OFFSET(SIGCONTEXT_ebp, sigcontext, ebp);
|
|
|
|
OFFSET(SIGCONTEXT_esp, sigcontext, esp);
|
|
|
|
OFFSET(SIGCONTEXT_eip, sigcontext, eip);
|
|
|
|
BLANK();
|
|
|
|
|
|
|
|
OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
|
|
|
|
OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
|
|
|
|
OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
|
|
|
|
OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
|
|
|
|
OFFSET(CPUINFO_hard_math, cpuinfo_x86, hard_math);
|
|
|
|
OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
|
|
|
|
OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
|
|
|
|
OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
|
|
|
|
BLANK();
|
|
|
|
|
|
|
|
OFFSET(TI_task, thread_info, task);
|
|
|
|
OFFSET(TI_exec_domain, thread_info, exec_domain);
|
|
|
|
OFFSET(TI_flags, thread_info, flags);
|
|
|
|
OFFSET(TI_status, thread_info, status);
|
|
|
|
OFFSET(TI_preempt_count, thread_info, preempt_count);
|
|
|
|
OFFSET(TI_addr_limit, thread_info, addr_limit);
|
|
|
|
OFFSET(TI_restart_block, thread_info, restart_block);
|
[PATCH] vdso: randomize the i386 vDSO by moving it into a vma
Move the i386 VDSO down into a vma and thus randomize it.
Besides the security implications, this feature also helps debuggers, which
can COW a vma-backed VDSO just like a normal DSO and can thus do
single-stepping and other debugging features.
It's good for hypervisors (Xen, VMWare) too, which typically live in the same
high-mapped address space as the VDSO, hence whenever the VDSO is used, they
get lots of guest pagefaults and have to fix such guest accesses up - which
slows things down instead of speeding things up (the primary purpose of the
VDSO).
There's a new CONFIG_COMPAT_VDSO (default=y) option, which provides support
for older glibcs that still rely on a prelinked high-mapped VDSO. Newer
distributions (using glibc 2.3.3 or later) can turn this option off. Turning
it off is also recommended for security reasons: attackers cannot use the
predictable high-mapped VDSO page as syscall trampoline anymore.
There is a new vdso=[0|1] boot option as well, and a runtime
/proc/sys/vm/vdso_enabled sysctl switch, that allows the VDSO to be turned
on/off.
(This version of the VDSO-randomization patch also has working ELF
coredumping, the previous patch crashed in the coredumping code.)
This code is a combined work of the exec-shield VDSO randomization
code and Gerd Hoffmann's hypervisor-centric VDSO patch. Rusty Russell
started this patch and i completed it.
[akpm@osdl.org: cleanups]
[akpm@osdl.org: compile fix]
[akpm@osdl.org: compile fix 2]
[akpm@osdl.org: compile fix 3]
[akpm@osdl.org: revernt MAXMEM change]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Cc: Gerd Hoffmann <kraxel@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
19 years ago
|
|
|
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
|
xen: use iret directly when possible
Most of the time we can simply use the iret instruction to exit the
kernel, rather than having to use the iret hypercall - the only
exception is if we're returning into vm86 mode, or from delivering an
NMI (which we don't support yet).
When running native, iret has the behaviour of testing for a pending
interrupt atomically with re-enabling interrupts. Unfortunately
there's no way to do this with Xen, so there's a window in which we
could get a recursive exception after enabling events but before
actually returning to userspace.
This causes a problem: if the nested interrupt causes one of the
task's TIF_WORK_MASK flags to be set, they will not be checked again
before returning to userspace. This means that pending work may be
left pending indefinitely, until the process enters and leaves the
kernel again. The net effect is that a pending signal or reschedule
event could be delayed for an unbounded amount of time.
To deal with this, the xen event upcall handler checks to see if the
EIP is within the critical section of the iret code, after events
are (potentially) enabled up to the iret itself. If its within this
range, it calls the iret critical section fixup, which adjusts the
stack to deal with any unrestored registers, and then shifts the
stack frame up to replace the previous invocation.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
18 years ago
|
|
|
OFFSET(TI_cpu, thread_info, cpu);
|
|
|
|
BLANK();
|
|
|
|
|
|
|
|
OFFSET(GDS_size, Xgt_desc_struct, size);
|
|
|
|
OFFSET(GDS_address, Xgt_desc_struct, address);
|
|
|
|
OFFSET(GDS_pad, Xgt_desc_struct, pad);
|
|
|
|
BLANK();
|
|
|
|
|
|
|
|
OFFSET(PT_EBX, pt_regs, ebx);
|
|
|
|
OFFSET(PT_ECX, pt_regs, ecx);
|
|
|
|
OFFSET(PT_EDX, pt_regs, edx);
|
|
|
|
OFFSET(PT_ESI, pt_regs, esi);
|
|
|
|
OFFSET(PT_EDI, pt_regs, edi);
|
|
|
|
OFFSET(PT_EBP, pt_regs, ebp);
|
|
|
|
OFFSET(PT_EAX, pt_regs, eax);
|
|
|
|
OFFSET(PT_DS, pt_regs, xds);
|
|
|
|
OFFSET(PT_ES, pt_regs, xes);
|
|
|
|
OFFSET(PT_FS, pt_regs, xfs);
|
|
|
|
OFFSET(PT_ORIG_EAX, pt_regs, orig_eax);
|
|
|
|
OFFSET(PT_EIP, pt_regs, eip);
|
|
|
|
OFFSET(PT_CS, pt_regs, xcs);
|
|
|
|
OFFSET(PT_EFLAGS, pt_regs, eflags);
|
|
|
|
OFFSET(PT_OLDESP, pt_regs, esp);
|
|
|
|
OFFSET(PT_OLDSS, pt_regs, xss);
|
|
|
|
BLANK();
|
|
|
|
|
|
|
|
OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
|
|
|
|
OFFSET(RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
|
|
|
|
BLANK();
|
|
|
|
|
|
|
|
OFFSET(pbe_address, pbe, address);
|
|
|
|
OFFSET(pbe_orig_address, pbe, orig_address);
|
|
|
|
OFFSET(pbe_next, pbe, next);
|
|
|
|
|
|
|
|
/* Offset from the sysenter stack to tss.esp0 */
|
|
|
|
DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
|
|
|
|
sizeof(struct tss_struct));
|
|
|
|
|
|
|
|
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
|
|
|
|
DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
|
|
|
|
DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
|
|
|
|
DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
|
|
|
|
DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
|
|
|
|
|
|
|
|
DEFINE(VDSO_PRELINK_asm, VDSO_PRELINK);
|
|
|
|
|
|
|
|
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
|
|
|
|
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
|
|
BLANK();
|
|
|
|
OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled);
|
|
|
|
OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable);
|
|
|
|
OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable);
|
|
|
|
OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit);
|
|
|
|
OFFSET(PARAVIRT_iret, paravirt_ops, iret);
|
|
|
|
OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_XEN
|
|
|
|
BLANK();
|
|
|
|
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
|
|
|
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_LGUEST_GUEST
|
|
|
|
BLANK();
|
|
|
|
OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
|
|
|
|
OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
|
|
|
|
OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc);
|
|
|
|
OFFSET(LGUEST_PAGES_host_cr3, lguest_pages, state.host_cr3);
|
|
|
|
OFFSET(LGUEST_PAGES_host_sp, lguest_pages, state.host_sp);
|
|
|
|
OFFSET(LGUEST_PAGES_guest_gdt_desc, lguest_pages,state.guest_gdt_desc);
|
|
|
|
OFFSET(LGUEST_PAGES_guest_idt_desc, lguest_pages,state.guest_idt_desc);
|
|
|
|
OFFSET(LGUEST_PAGES_guest_gdt, lguest_pages, state.guest_gdt);
|
|
|
|
OFFSET(LGUEST_PAGES_regs_trapnum, lguest_pages, regs.trapnum);
|
|
|
|
OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
|
|
|
|
OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
|
|
|
|
#endif
|
|
|
|
}
|