217 lines
		
	
	
		
			6.7 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			217 lines
		
	
	
		
			6.7 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| --- b/include/linux/sched.h
 | |
| +++ b/include/linux/sched.h
 | |
| @@ -397,6 +397,10 @@
 | |
|  extern unsigned long
 | |
|  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
 | |
|  		       unsigned long, unsigned long);
 | |
| +
 | |
| +extern unsigned long
 | |
| +arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
 | |
| +		       unsigned long, unsigned long);
 | |
|  extern unsigned long
 | |
|  arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 | |
|  			  unsigned long len, unsigned long pgoff,
 | |
| --- b/mm/mmap.c
 | |
| +++ b/mm/mmap.c
 | |
| @@ -28,6 +28,7 @@
 | |
|  #include <linux/mmu_notifier.h>
 | |
|  #include <linux/perf_event.h>
 | |
|  #include <linux/audit.h>
 | |
| +#include <linux/random.h>
 | |
|  
 | |
|  #include <asm/uaccess.h>
 | |
|  #include <asm/cacheflush.h>
 | |
| @@ -1000,7 +1001,8 @@
 | |
|  	/* Obtain the address to map to. we verify (or select) it and ensure
 | |
|  	 * that it represents a valid section of the address space.
 | |
|  	 */
 | |
| -	addr = get_unmapped_area(file, addr, len, pgoff, flags);
 | |
| +	addr = get_unmapped_area_prot(file, addr, len, pgoff, flags,
 | |
| +		prot & PROT_EXEC);
 | |
|  	if (addr & ~PAGE_MASK)
 | |
|  		return addr;
 | |
|  
 | |
| @@ -1552,8 +1554,8 @@
 | |
|  }
 | |
|  
 | |
|  unsigned long
 | |
| -get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 | |
| -		unsigned long pgoff, unsigned long flags)
 | |
| +get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
 | |
| +		unsigned long pgoff, unsigned long flags, int exec)
 | |
|  {
 | |
|  	unsigned long (*get_area)(struct file *, unsigned long,
 | |
|  				  unsigned long, unsigned long, unsigned long);
 | |
| @@ -1566,7 +1568,11 @@
 | |
|  	if (len > TASK_SIZE)
 | |
|  		return -ENOMEM;
 | |
|  
 | |
| -	get_area = current->mm->get_unmapped_area;
 | |
| +	if (exec && current->mm->get_unmapped_exec_area)
 | |
| +		get_area = current->mm->get_unmapped_exec_area;
 | |
| +	else
 | |
| +		get_area = current->mm->get_unmapped_area;
 | |
| +
 | |
|  	if (file && file->f_op && file->f_op->get_unmapped_area)
 | |
|  		get_area = file->f_op->get_unmapped_area;
 | |
|  	addr = get_area(file, addr, len, pgoff, flags);
 | |
| @@ -1580,8 +1586,83 @@
 | |
|  
 | |
|  	return arch_rebalance_pgtables(addr, len);
 | |
|  }
 | |
| +EXPORT_SYMBOL(get_unmapped_area_prot);
 | |
| +
 | |
| +static bool should_randomize(void)
 | |
| +{
 | |
| +	return (current->flags & PF_RANDOMIZE) &&
 | |
| +		!(current->personality & ADDR_NO_RANDOMIZE);
 | |
| +}
 | |
| +
 | |
| +#define SHLIB_BASE	0x00110000
 | |
| +
 | |
| +unsigned long
 | |
| +arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
 | |
| +		unsigned long len0, unsigned long pgoff, unsigned long flags)
 | |
| +{
 | |
| +	unsigned long addr = addr0, len = len0;
 | |
| +	struct mm_struct *mm = current->mm;
 | |
| +	struct vm_area_struct *vma;
 | |
| +	unsigned long tmp;
 | |
| +
 | |
| +	if (len > TASK_SIZE)
 | |
| +		return -ENOMEM;
 | |
| +
 | |
| +	if (flags & MAP_FIXED)
 | |
| +		return addr;
 | |
| +
 | |
| +	if (!addr)
 | |
| +		addr = !should_randomize() ? SHLIB_BASE :
 | |
| +			randomize_range(SHLIB_BASE, 0x01000000, len);
 | |
| +
 | |
| +	if (addr) {
 | |
| +		addr = PAGE_ALIGN(addr);
 | |
| +		vma = find_vma(mm, addr);
 | |
| +		if (TASK_SIZE - len >= addr &&
 | |
| +		    (!vma || addr + len <= vma->vm_start))
 | |
| +			return addr;
 | |
| +	}
 | |
| +
 | |
| +	addr = SHLIB_BASE;
 | |
| +	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 | |
| +		/* At this point:  (!vma || addr < vma->vm_end). */
 | |
| +		if (TASK_SIZE - len < addr)
 | |
| +			return -ENOMEM;
 | |
| +
 | |
| +		if (!vma || addr + len <= vma->vm_start) {
 | |
| +			/*
 | |
| +			 * Must not let a PROT_EXEC mapping get into the
 | |
| +			 * brk area:
 | |
| +			 */
 | |
| +			if (addr + len > mm->brk)
 | |
| +				goto failed;
 | |
| +
 | |
| +			/*
 | |
| +			 * Up until the brk area we randomize addresses
 | |
| +			 * as much as possible:
 | |
| +			 */
 | |
| +			if (addr >= 0x01000000 && should_randomize()) {
 | |
| +				tmp = randomize_range(0x01000000,
 | |
| +					PAGE_ALIGN(max(mm->start_brk,
 | |
| +					(unsigned long)0x08000000)), len);
 | |
| +				vma = find_vma(mm, tmp);
 | |
| +				if (TASK_SIZE - len >= tmp &&
 | |
| +				    (!vma || tmp + len <= vma->vm_start))
 | |
| +					return tmp;
 | |
| +			}
 | |
| +			/*
 | |
| +			 * Ok, randomization didnt work out - return
 | |
| +			 * the result of the linear search:
 | |
| +			 */
 | |
| +			return addr;
 | |
| +		}
 | |
| +		addr = vma->vm_end;
 | |
| +	}
 | |
| +
 | |
| +failed:
 | |
| +	return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
 | |
| +}
 | |
|  
 | |
| -EXPORT_SYMBOL(get_unmapped_area);
 | |
|  
 | |
|  /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 | |
|  struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 | |
| --- a/arch/x86/mm/mmap.c
 | |
| +++ b/arch/x86/mm/mmap.c
 | |
| @@ -124,13 +124,16 @@ static unsigned long mmap_legacy_base(void)
 | |
|   */
 | |
|  void arch_pick_mmap_layout(struct mm_struct *mm)
 | |
|  {
 | |
|  	if (mmap_is_legacy()) {
 | |
|  		mm->mmap_base = mmap_legacy_base();
 | |
|  		mm->get_unmapped_area = arch_get_unmapped_area;
 | |
|  		mm->unmap_area = arch_unmap_area;
 | |
|  	} else {
 | |
|  		mm->mmap_base = mmap_base();
 | |
|  		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 | |
| +		if (!(current->personality & READ_IMPLIES_EXEC)
 | |
| +		    && mmap_is_ia32())
 | |
| +			mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
 | |
|  		mm->unmap_area = arch_unmap_area_topdown;
 | |
|  	}
 | |
|  }
 | |
| --- a/arch/x86/vdso/vdso32-setup.c
 | |
| +++ b/arch/x86/vdso/vdso32-setup.c
 | |
| @@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 | |
|  	if (compat)
 | |
|  		addr = VDSO_HIGH_BASE;
 | |
|  	else {
 | |
| -		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
 | |
| +		addr = get_unmapped_area_prot(NULL, 0, PAGE_SIZE, 0, 0, 1);
 | |
|  		if (IS_ERR_VALUE(addr)) {
 | |
|  			ret = addr;
 | |
|  			goto up_fail;
 | |
| --- a/include/linux/mm.h
 | |
| +++ b/include/linux/mm.h
 | |
| @@ -1263,7 +1263,13 @@ extern int install_special_mapping(struct mm_struct *mm,
 | |
|  				   unsigned long addr, unsigned long len,
 | |
|  				   unsigned long flags, struct page **pages);
 | |
|  
 | |
| -extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 | |
| +extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
 | |
| +
 | |
| +static inline unsigned long get_unmapped_area(struct file *file, unsigned long addr,
 | |
| +		unsigned long len, unsigned long pgoff, unsigned long flags)
 | |
| +{
 | |
| +	return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
 | |
| +}
 | |
|  
 | |
|  extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 | |
|  	unsigned long len, unsigned long prot,
 | |
| --- a/include/linux/mm_types.h
 | |
| +++ b/include/linux/mm_types.h
 | |
| @@ -227,6 +227,9 @@ struct mm_struct {
 | |
|  	unsigned long (*get_unmapped_area) (struct file *filp,
 | |
|  				unsigned long addr, unsigned long len,
 | |
|  				unsigned long pgoff, unsigned long flags);
 | |
| +       unsigned long (*get_unmapped_exec_area) (struct file *filp,
 | |
| +				unsigned long addr, unsigned long len,
 | |
| +				unsigned long pgoff, unsigned long flags);
 | |
|  	void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
 | |
|  #endif
 | |
|  	unsigned long mmap_base;		/* base of mmap area */
 | |
| --- a/mm/mremap.c
 | |
| +++ b/mm/mremap.c
 | |
| @@ -487,10 +487,10 @@ unsigned long do_mremap(unsigned long addr,
 | |
|  		if (vma->vm_flags & VM_MAYSHARE)
 | |
|  			map_flags |= MAP_SHARED;
 | |
|  
 | |
| -		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
 | |
| +		new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
 | |
|  					vma->vm_pgoff +
 | |
|  					((addr - vma->vm_start) >> PAGE_SHIFT),
 | |
| -					map_flags);
 | |
| +					map_flags, vma->vm_flags & VM_EXEC);
 | |
|  		if (new_addr & ~PAGE_MASK) {
 | |
|  			ret = new_addr;
 | |
|  			goto out;
 |