diff --git a/kernel.spec b/kernel.spec index 8eaa19641..699254427 100644 --- a/kernel.spec +++ b/kernel.spec @@ -1173,7 +1173,7 @@ ApplyPatch linux-2.6.29-sparc-IOC_TYPECHECK.patch # # Exec shield # -#ApplyPatch linux-2.6-i386-nx-emulation.patch ###FIX +ApplyPatch linux-2.6-i386-nx-emulation.patch ApplyPatch linux-2.6-32bit-mmap-exec-randomization.patch # diff --git a/linux-2.6-i386-nx-emulation.patch b/linux-2.6-i386-nx-emulation.patch index 3e55b88ca..a6ac19f6d 100644 --- a/linux-2.6-i386-nx-emulation.patch +++ b/linux-2.6-i386-nx-emulation.patch @@ -3,21 +3,21 @@ @@ -5,6 +5,7 @@ #include #include - #include + +#include + #include - static inline void fill_ldt(struct desc_struct *desc, - const struct user_desc *info) -@@ -93,6 +94,9 @@ static inline int desc_empty(const void *ptr) + static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) +@@ -97,6 +98,9 @@ static inline int desc_empty(const void *ptr) - #define load_TLS(t, cpu) native_load_tls(t, cpu) - #define set_ldt native_set_ldt + #define load_TLS(t, cpu) native_load_tls(t, cpu) + #define set_ldt native_set_ldt +#ifdef CONFIG_X86_32 -+#define load_user_cs_desc native_load_user_cs_desc ++#define load_user_cs_desc native_load_user_cs_desc +#endif /*CONFIG_X86_32*/ - #define write_ldt_entry(dt, entry, desc) \ - native_write_ldt_entry(dt, entry, desc) + #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc) + #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type) @@ -392,4 +396,25 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); } @@ -46,7 +46,7 @@ #endif /* _ASM_X86_DESC_H */ --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h -@@ -7,18 +7,25 @@ +@@ -7,6 +7,9 @@ /* * The x86 doesn't have a mmu context, but * we put the segment information here. @@ -56,19 +56,15 @@ */ typedef struct { void *ldt; - int size; +@@ -19,6 +22,11 @@ typedef struct { + struct mutex lock; void *vdso; - - #ifdef CONFIG_X86_64 - /* True if mm supports a task running in 32 bit compatibility mode. */ - unsigned short ia32_compat; - #endif ++ +#ifdef CONFIG_X86_32 + struct desc_struct user_cs; + unsigned long exec_limit; +#endif - } mm_context_t; #ifdef CONFIG_SMP @@ -508,16 +504,15 @@ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); -@@ -388,6 +401,9 @@ +@@ -432,6 +432,8 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, + struct vm_area_struct *prev, struct rb_node **rb_link, + struct rb_node *rb_parent) { - struct vm_area_struct *next; - + if (vma->vm_flags & VM_EXEC) + arch_add_exec_range(mm, vma->vm_end); -+ - vma->vm_prev = prev; - if (prev) { - next = prev->vm_next; + __vma_link_list(mm, vma, prev, rb_parent); + __vma_link_rb(mm, vma, rb_link, rb_parent); + } @@ -489,6 +504,8 @@ rb_erase(&vma->vm_rb, &mm->mm_rb); if (mm->mmap_cache == vma) @@ -555,8 +550,8 @@ /* Success. */ @@ -2254,6 +2367,7 @@ - free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); - tlb_finish_mmu(tlb, 0, end); + free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); + tlb_finish_mmu(&tlb, 0, end); + arch_flush_exec_range(mm); /*