#include <linux/syscalls.h>
#include <linux/mempolicy.h>
#include <linux/hugetlb.h>
+#include <linux/sched.h>
+
+/*
+ * Any behaviour which results in changes to the vma->vm_flags needs to
+ * take mmap_sem for writing. Others, which simply traverse vmas, need
+ * to only take it for reading.
+ */
+static int madvise_need_mmap_write(int behavior)
+{
+ switch (behavior) {
+ case MADV_REMOVE:
+ case MADV_WILLNEED:
+ case MADV_DONTNEED:
+ return 0;
+ default:
+ /* be safe, default to 1. list exceptions explicitly */
+ return 1;
+ }
+}
/*
* We can potentially split a vm area into separate
if (!file)
return -EBADF;
- if (file->f_mapping->a_ops->get_xip_page) {
+ if (file->f_mapping->a_ops->get_xip_mem) {
/* no bad return value, but ignore advice */
return 0;
}
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
/* vmtruncate_range needs to take i_mutex and i_alloc_sem */
- up_write(¤t->mm->mmap_sem);
+ up_read(¤t->mm->mmap_sem);
error = vmtruncate_range(mapping->host, offset, endoff);
- down_write(¤t->mm->mmap_sem);
+ down_read(¤t->mm->mmap_sem);
return error;
}
struct vm_area_struct * vma, *prev;
int unmapped_error = 0;
int error = -EINVAL;
+ int write;
size_t len;
- down_write(¤t->mm->mmap_sem);
+ write = madvise_need_mmap_write(behavior);
+ if (write)
+ down_write(¤t->mm->mmap_sem);
+ else
+ down_read(¤t->mm->mmap_sem);
if (start & ~PAGE_MASK)
goto out;
vma = find_vma(current->mm, start);
}
out:
- up_write(¤t->mm->mmap_sem);
+ if (write)
+ up_write(¤t->mm->mmap_sem);
+ else
+ up_read(¤t->mm->mmap_sem);
+
return error;
}