Bring back the 32bit mmap randomization patch for now.
NX emulation is still too dependant upon it.
This commit is contained in:
parent
f29a760a48
commit
c62acbf43a
@ -645,6 +645,7 @@ Patch09: linux-2.6-upstream-reverts.patch
|
||||
|
||||
# Standalone patches
|
||||
|
||||
Patch160: linux-2.6-32bit-mmap-exec-randomization.patch
|
||||
Patch161: linux-2.6-i386-nx-emulation.patch
|
||||
|
||||
Patch202: linux-2.6-debug-taint-vm.patch
|
||||
@ -1198,6 +1199,7 @@ ApplyOptionalPatch linux-2.6-upstream-reverts.patch -R
|
||||
|
||||
# Architecture patches
|
||||
# x86(-64)
|
||||
ApplyPatch linux-2.6-32bit-mmap-exec-randomization.patch
|
||||
ApplyPatch linux-2.6-i386-nx-emulation.patch
|
||||
|
||||
#
|
||||
@ -2035,7 +2037,8 @@ fi
|
||||
%changelog
|
||||
* Sat Aug 27 2011 Dave Jones <davej@redhat.com>
|
||||
- Fix get_gate_vma usage in i386 NX emulation
|
||||
Fix up dependancy on the dropped randomization patch.
|
||||
- Bring back the 32bit mmap randomization patch for now.
|
||||
NX emulation is still too dependant upon it.
|
||||
|
||||
* Sat Aug 27 2011 Josh Boyer <jwboyer@redhat.com>
|
||||
- Linux 3.1-rc3-git6
|
||||
|
226
linux-2.6-32bit-mmap-exec-randomization.patch
Normal file
226
linux-2.6-32bit-mmap-exec-randomization.patch
Normal file
@ -0,0 +1,226 @@
|
||||
Before:
|
||||
Heap randomisation test (PIE) : 16 bits (guessed)
|
||||
Main executable randomisation (PIE) : 8 bits (guessed)
|
||||
|
||||
after:
|
||||
Heap randomisation test (PIE) : 19 bits (guessed)
|
||||
Main executable randomisation (PIE) : 12 bits (guessed)
|
||||
|
||||
|
||||
|
||||
--- b/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -397,6 +397,10 @@
|
||||
extern unsigned long
|
||||
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long);
|
||||
+
|
||||
+extern unsigned long
|
||||
+arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
|
||||
+ unsigned long, unsigned long);
|
||||
extern unsigned long
|
||||
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff,
|
||||
--- b/mm/mmap.c
|
||||
+++ b/mm/mmap.c
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/khugepaged.h>
|
||||
+#include <linux/random.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@@ -1000,7 +1001,8 @@
|
||||
/* Obtain the address to map to. we verify (or select) it and ensure
|
||||
* that it represents a valid section of the address space.
|
||||
*/
|
||||
- addr = get_unmapped_area(file, addr, len, pgoff, flags);
|
||||
+ addr = get_unmapped_area_prot(file, addr, len, pgoff, flags,
|
||||
+ prot & PROT_EXEC);
|
||||
if (addr & ~PAGE_MASK)
|
||||
return addr;
|
||||
|
||||
@@ -1552,8 +1554,8 @@
|
||||
}
|
||||
|
||||
unsigned long
|
||||
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
||||
- unsigned long pgoff, unsigned long flags)
|
||||
+get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
|
||||
+ unsigned long pgoff, unsigned long flags, int exec)
|
||||
{
|
||||
unsigned long (*get_area)(struct file *, unsigned long,
|
||||
unsigned long, unsigned long, unsigned long);
|
||||
@@ -1566,7 +1568,11 @@
|
||||
if (len > TASK_SIZE)
|
||||
return -ENOMEM;
|
||||
|
||||
- get_area = current->mm->get_unmapped_area;
|
||||
+ if (exec && current->mm->get_unmapped_exec_area)
|
||||
+ get_area = current->mm->get_unmapped_exec_area;
|
||||
+ else
|
||||
+ get_area = current->mm->get_unmapped_area;
|
||||
+
|
||||
if (file && file->f_op && file->f_op->get_unmapped_area)
|
||||
get_area = file->f_op->get_unmapped_area;
|
||||
addr = get_area(file, addr, len, pgoff, flags);
|
||||
@@ -1580,8 +1586,83 @@
|
||||
|
||||
return arch_rebalance_pgtables(addr, len);
|
||||
}
|
||||
+EXPORT_SYMBOL(get_unmapped_area_prot);
|
||||
+
|
||||
+static bool should_randomize(void)
|
||||
+{
|
||||
+ return (current->flags & PF_RANDOMIZE) &&
|
||||
+ !(current->personality & ADDR_NO_RANDOMIZE);
|
||||
+}
|
||||
+
|
||||
+#define SHLIB_BASE 0x00110000
|
||||
+
|
||||
+unsigned long
|
||||
+arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
|
||||
+ unsigned long len0, unsigned long pgoff, unsigned long flags)
|
||||
+{
|
||||
+ unsigned long addr = addr0, len = len0;
|
||||
+ struct mm_struct *mm = current->mm;
|
||||
+ struct vm_area_struct *vma;
|
||||
+ unsigned long tmp;
|
||||
+
|
||||
+ if (len > TASK_SIZE)
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ if (flags & MAP_FIXED)
|
||||
+ return addr;
|
||||
+
|
||||
+ if (!addr)
|
||||
+ addr = !should_randomize() ? SHLIB_BASE :
|
||||
+ randomize_range(SHLIB_BASE, 0x01000000, len);
|
||||
+
|
||||
+ if (addr) {
|
||||
+ addr = PAGE_ALIGN(addr);
|
||||
+ vma = find_vma(mm, addr);
|
||||
+ if (TASK_SIZE - len >= addr &&
|
||||
+ (!vma || addr + len <= vma->vm_start))
|
||||
+ return addr;
|
||||
+ }
|
||||
+
|
||||
+ addr = SHLIB_BASE;
|
||||
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
|
||||
+ /* At this point: (!vma || addr < vma->vm_end). */
|
||||
+ if (TASK_SIZE - len < addr)
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ if (!vma || addr + len <= vma->vm_start) {
|
||||
+ /*
|
||||
+ * Must not let a PROT_EXEC mapping get into the
|
||||
+ * brk area:
|
||||
+ */
|
||||
+ if (addr + len > mm->brk)
|
||||
+ goto failed;
|
||||
+
|
||||
+ /*
|
||||
+ * Up until the brk area we randomize addresses
|
||||
+ * as much as possible:
|
||||
+ */
|
||||
+ if (addr >= 0x01000000 && should_randomize()) {
|
||||
+ tmp = randomize_range(0x01000000,
|
||||
+ PAGE_ALIGN(max(mm->start_brk,
|
||||
+ (unsigned long)0x08000000)), len);
|
||||
+ vma = find_vma(mm, tmp);
|
||||
+ if (TASK_SIZE - len >= tmp &&
|
||||
+ (!vma || tmp + len <= vma->vm_start))
|
||||
+ return tmp;
|
||||
+ }
|
||||
+ /*
|
||||
+ * Ok, randomization didnt work out - return
|
||||
+ * the result of the linear search:
|
||||
+ */
|
||||
+ return addr;
|
||||
+ }
|
||||
+ addr = vma->vm_end;
|
||||
+ }
|
||||
+
|
||||
+failed:
|
||||
+ return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
|
||||
+}
|
||||
|
||||
-EXPORT_SYMBOL(get_unmapped_area);
|
||||
|
||||
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
||||
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
||||
--- a/arch/x86/mm/mmap.c
|
||||
+++ b/arch/x86/mm/mmap.c
|
||||
@@ -124,13 +124,16 @@ static unsigned long mmap_legacy_base(void)
|
||||
*/
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = mmap_legacy_base();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
+ if (!(current->personality & READ_IMPLIES_EXEC)
|
||||
+ && mmap_is_ia32())
|
||||
+ mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
--- a/arch/x86/vdso/vdso32-setup.c
|
||||
+++ b/arch/x86/vdso/vdso32-setup.c
|
||||
@@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||
if (compat)
|
||||
addr = VDSO_HIGH_BASE;
|
||||
else {
|
||||
- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
|
||||
+ addr = get_unmapped_area_prot(NULL, 0, PAGE_SIZE, 0, 0, 1);
|
||||
if (IS_ERR_VALUE(addr)) {
|
||||
ret = addr;
|
||||
goto up_fail;
|
||||
--- a/include/linux/mm.h
|
||||
+++ b/include/linux/mm.h
|
||||
@@ -1263,7 +1263,13 @@ extern int install_special_mapping(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long flags, struct page **pages);
|
||||
|
||||
-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
|
||||
+extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
|
||||
+
|
||||
+static inline unsigned long get_unmapped_area(struct file *file, unsigned long addr,
|
||||
+ unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
+{
|
||||
+ return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
|
||||
+}
|
||||
|
||||
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long prot,
|
||||
--- a/include/linux/mm_types.h
|
||||
+++ b/include/linux/mm_types.h
|
||||
@@ -227,6 +227,9 @@ struct mm_struct {
|
||||
unsigned long (*get_unmapped_area) (struct file *filp,
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long pgoff, unsigned long flags);
|
||||
+ unsigned long (*get_unmapped_exec_area) (struct file *filp,
|
||||
+ unsigned long addr, unsigned long len,
|
||||
+ unsigned long pgoff, unsigned long flags);
|
||||
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
|
||||
#endif
|
||||
unsigned long mmap_base; /* base of mmap area */
|
||||
--- a/mm/mremap.c
|
||||
+++ b/mm/mremap.c
|
||||
@@ -487,10 +487,10 @@ unsigned long do_mremap(unsigned long addr,
|
||||
if (vma->vm_flags & VM_MAYSHARE)
|
||||
map_flags |= MAP_SHARED;
|
||||
|
||||
- new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
|
||||
+ new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
|
||||
vma->vm_pgoff +
|
||||
((addr - vma->vm_start) >> PAGE_SHIFT),
|
||||
- map_flags);
|
||||
+ map_flags, vma->vm_flags & VM_EXEC);
|
||||
if (new_addr & ~PAGE_MASK) {
|
||||
ret = new_addr;
|
||||
goto out;
|
@ -606,7 +606,7 @@ index 57d1868..29c0c35 100644
|
||||
+ /* in the case of NX emulation, shove the brk segment way out of the
|
||||
+ way of the exec randomization area, since it can collide with
|
||||
+ future allocations if not. */
|
||||
+ if ( (mm->get_unmapped_exec == arch_get_unmapped_exec_area) &&
|
||||
+ if ( (mm->get_unmapped_exec_area == arch_get_unmapped_exec_area) &&
|
||||
+ (mm->brk < 0x08000000) ) {
|
||||
+ bump = (TASK_SIZE/6);
|
||||
+ }
|
||||
|
Loading…
Reference in New Issue
Block a user