--- ./arch/um/Kconfig.~1~ 2003-12-28 12:04:19.000000000 +0100 +++ ./arch/um/Kconfig 2003-12-28 12:05:12.000000000 +0100 @@ -196,6 +196,9 @@ config HIGHMEM bool "Highmem support" +config PROC_MM + bool "/proc/mm support" + config KERNEL_STACK_ORDER int "Kernel stack size order" default 2 --- ./include/linux/mm.h.~1~ 2003-12-28 12:01:29.000000000 +0100 +++ ./include/linux/mm.h 2003-12-28 12:05:18.000000000 +0100 @@ -487,6 +487,9 @@ return __set_page_dirty_buffers(page); } +extern long do_mprotect(struct mm_struct *mm, unsigned long start, + size_t len, unsigned long prot); + /* * On a two-level page table, this ends up being trivial. Thus the * inlining and the symmetry break with pte_alloc_map() that does all @@ -517,9 +520,15 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); -extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, +extern unsigned long __do_mmap_pgoff(struct mm_struct *mm, struct file *file, + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flag, + unsigned long pgoff); +static inline unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flag, unsigned long pgoff); + unsigned long flag, unsigned long pgoff) { + __do_mmap_pgoff(current->mm, file, addr, len, prot, flag, pgoff); +} static inline unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, --- ./include/linux/proc_mm.h.~1~ 2003-12-28 12:01:29.000000000 +0100 +++ ./include/linux/proc_mm.h 2003-12-28 12:05:12.000000000 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#ifndef __PROC_MM_H +#define __PROC_MM_H + +#include "linux/sched.h" + +#define MM_MMAP 54 +#define MM_MUNMAP 55 +#define MM_MPROTECT 56 +#define MM_COPY_SEGMENTS 57 + +struct mm_mmap { + unsigned long addr; + unsigned long len; + unsigned long prot; + unsigned long flags; + unsigned long fd; + unsigned long offset; +}; + +struct mm_munmap { + unsigned long addr; + unsigned long len; +}; + +struct mm_mprotect { + unsigned long addr; + unsigned long len; + unsigned int prot; +}; + +struct proc_mm_op { + int op; + union { + struct mm_mmap mmap; + struct mm_munmap munmap; + struct mm_mprotect mprotect; + int copy_segments; + } u; +}; + +extern struct mm_struct *proc_mm_get_mm(int fd); + +#endif --- ./mm/mmap.c.~1~ 2003-12-28 12:01:29.000000000 +0100 +++ ./mm/mmap.c 2003-12-28 12:05:12.000000000 +0100 @@ -462,11 +462,11 @@ * The caller must hold down_write(current->mm->mmap_sem). */ -unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flags, unsigned long pgoff) +unsigned long __do_mmap_pgoff(struct mm_struct *mm, struct file * file, + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long pgoff) { - struct mm_struct * mm = current->mm; struct vm_area_struct * vma, * prev; struct inode *inode; unsigned int vm_flags; @@ -714,7 +714,7 @@ return error; } -EXPORT_SYMBOL(do_mmap_pgoff); +EXPORT_SYMBOL(__do_mmap_pgoff); /* Get an address range which is currently unmapped. * For shmat() with addr=0. --- ./mm/mprotect.c.~1~ 2003-12-28 12:01:29.000000000 +0100 +++ ./mm/mprotect.c 2003-12-28 12:05:12.000000000 +0100 @@ -222,7 +222,8 @@ } asmlinkage long -sys_mprotect(unsigned long start, size_t len, unsigned long prot) +do_mprotect(struct mm_struct *mm, unsigned long start, size_t len, + unsigned long prot) { unsigned long vm_flags, nstart, end, tmp; struct vm_area_struct * vma, * next, * prev; @@ -245,9 +246,9 @@ vm_flags = calc_vm_prot_bits(prot); - down_write(¤t->mm->mmap_sem); + down_write(&mm->mmap_sem); - vma = find_vma_prev(current->mm, start, &prev); + vma = find_vma_prev(mm, start, &prev); error = -ENOMEM; if (!vma) goto out; @@ -326,6 +327,11 @@ prev->vm_mm->map_count--; } out: - up_write(¤t->mm->mmap_sem); + up_write(&mm->mmap_sem); return error; } + +asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot) +{ + return(do_mprotect(current->mm, start, len, prot)); +} --- ./mm/proc_mm.c.~1~ 2003-12-28 12:01:29.000000000 +0100 +++ ./mm/proc_mm.c 2003-12-28 12:05:12.000000000 +0100 @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#include "linux/mm.h" +#include "linux/init.h" +#include "linux/proc_fs.h" +#include "linux/proc_mm.h" +#include "linux/file.h" +#include "asm/uaccess.h" +#include "asm/mmu_context.h" + +static struct file_operations proc_mm_fops; + +struct mm_struct *proc_mm_get_mm(int fd) +{ + struct mm_struct *ret = ERR_PTR(-EBADF); + struct file *file; + + file = fget(fd); + if (!file) + goto out; + + ret = ERR_PTR(-EINVAL); + if(file->f_op != &proc_mm_fops) + goto out_fput; + + ret = file->private_data; + out_fput: + fput(file); + out: + return(ret); +} + +extern long do_mmap2(struct mm_struct *mm, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flags, unsigned long fd, + unsigned long pgoff); + +static ssize_t write_proc_mm(struct file *file, const char *buffer, + size_t count, loff_t *ppos) +{ + struct mm_struct *mm = file->private_data; + struct proc_mm_op req; + int n, ret; + + if(count > sizeof(req)) + return(-EINVAL); + + n = copy_from_user(&req, buffer, count); + if(n != 0) + return(-EFAULT); + + ret = count; + switch(req.op){ + case MM_MMAP: { + struct mm_mmap *map = &req.u.mmap; + + ret = do_mmap2(mm, map->addr, map->len, map->prot, + map->flags, map->fd, map->offset >> PAGE_SHIFT); + if((ret & ~PAGE_MASK) == 0) + ret = count; + + break; + } + case MM_MUNMAP: { + struct mm_munmap *unmap = &req.u.munmap; + + down_write(&mm->mmap_sem); + ret = do_munmap(mm, unmap->addr, unmap->len); + up_write(&mm->mmap_sem); + + if(ret == 0) + ret = count; + break; + } + case MM_MPROTECT: { + struct mm_mprotect *protect = &req.u.mprotect; + + ret = do_mprotect(mm, protect->addr, protect->len, + protect->prot); + if(ret == 0) + ret = count; + break; + } + + case MM_COPY_SEGMENTS: { + struct mm_struct *from = proc_mm_get_mm(req.u.copy_segments); + + if(IS_ERR(from)){ + ret = PTR_ERR(from); + break; + } + + mm_copy_segments(from, mm); + break; + } + default: + ret = -EINVAL; + break; + } + + return(ret); +} + +static int open_proc_mm(struct inode *inode, struct file *file) +{ + struct mm_struct *mm = mm_alloc(); + int ret; + + ret = -ENOMEM; + if(mm == NULL) + goto out_mem; + + ret = init_new_context(current, mm); + if(ret) + goto out_free; + + spin_lock(&mmlist_lock); + list_add(&mm->mmlist, ¤t->mm->mmlist); + mmlist_nr++; + spin_unlock(&mmlist_lock); + + file->private_data = mm; + + return(0); + + out_free: + mmput(mm); + out_mem: + return(ret); +} + +static int release_proc_mm(struct inode *inode, struct file *file) +{ + struct mm_struct *mm = file->private_data; + + mmput(mm); + return(0); +} + +static struct file_operations proc_mm_fops = { + .open = open_proc_mm, + .release = release_proc_mm, + .write = write_proc_mm, +}; + +static int make_proc_mm(void) +{ + struct proc_dir_entry *ent; + + ent = create_proc_entry("mm", 0222, &proc_root); + if(ent == NULL){ + printk("make_proc_mm : Failed to register /proc/mm\n"); + return(0); + } + ent->proc_fops = &proc_mm_fops; + + return(0); +} + +__initcall(make_proc_mm); + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ --- ./mm/Makefile.~1~ 2003-12-28 12:01:29.000000000 +0100 +++ ./mm/Makefile 2003-12-28 12:05:12.000000000 +0100 @@ -12,3 +12,5 @@ slab.o swap.o truncate.o vmscan.o $(mmu-y) obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o +obj-$(CONFIG_PROC_MM) += proc_mm.o + --- ./arch/um/kernel/syscall_kern.c~ 2003-12-28 12:51:48.000000000 +0100 +++ ./arch/um/kernel/syscall_kern.c 2003-12-28 15:22:58.000000000 +0100 @@ -65,21 +65,6 @@ return(ret); } -/* If SKAS patch is applied and PROC_MM is on, then we get this def from - * linux/mm.h; but this will maybe change, as it's unclean. But anyway skas3 - * is going away, so who cares? Without CONFIG_PROC_MM, the first param is just - * ignored, i.e. it is always current->mm. This is safe as without that patch - * nobody passes us a different value.*/ -#ifndef CONFIG_PROC_MM -static inline unsigned long __do_mmap_pgoff( - struct mm_struct *mm, struct file *file, - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flag, - unsigned long pgoff) { - return do_mmap_pgoff(file, addr, len, prot, flag, pgoff); -} -#endif - /* common code for old and new mmaps */ long do_mmap2(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd,