mm: protect VMA modifications using VMA sequence count

The VMA sequence count has been introduced to allow fast detection of
VMA modification when running a page fault handler without holding
the mmap_sem.

This patch provides protection against the VMA modification done in :
	- madvise()
	- mpol_rebind_policy()
	- vma_replace_policy()
	- change_prot_numa()
	- mlock(), munlock()
	- mprotect()
	- mmap_region()
	- collapse_huge_page()
	- userfaultd registering services

In addition, VMA fields which will be read during the speculative fault
path needs to be written using WRITE_ONCE to prevent write to be split
and intermediate values to be pushed to other CPUs.

Change-Id: Ic36046b7254e538b6baf7144c50ae577ee7f2074
Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:15
[vinmenon@codeaurora.org: trivial merge conflict fixes]
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
tirimbino
Laurent Dufour 7 years ago committed by Vinayak Menon
parent 184ccc18de
commit 4153099359
  1. 5
      fs/proc/task_mmu.c
  2. 17
      fs/userfaultfd.c
  3. 3
      mm/khugepaged.c
  4. 6
      mm/madvise.c
  5. 51
      mm/mempolicy.c
  6. 13
      mm/mlock.c
  7. 22
      mm/mmap.c
  8. 4
      mm/mprotect.c
  9. 8
      mm/swap_state.c

@ -1221,8 +1221,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
goto out_mm;
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma->vm_flags &= ~VM_SOFTDIRTY;
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags,
vma->vm_flags & ~VM_SOFTDIRTY);
vma_set_page_prot(vma);
vm_write_end(vma);
}
downgrade_write(&mm->mmap_sem);
break;

@ -656,8 +656,11 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
octx = vma->vm_userfaultfd_ctx.ctx;
if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
vm_write_begin(vma);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
WRITE_ONCE(vma->vm_flags,
vma->vm_flags & ~(VM_UFFD_WP | VM_UFFD_MISSING));
vm_write_end(vma);
return 0;
}
@ -883,8 +886,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
vma = prev;
else
prev = vma;
vma->vm_flags = new_flags;
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vm_write_end(vma);
}
up_write(&mm->mmap_sem);
mmput(mm);
@ -1443,8 +1448,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
vma->vm_flags = new_flags;
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
vma->vm_userfaultfd_ctx.ctx = ctx;
vm_write_end(vma);
skip:
prev = vma;
@ -1602,8 +1609,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
vma->vm_flags = new_flags;
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vm_write_end(vma);
skip:
prev = vma;

@ -1011,6 +1011,7 @@ static void collapse_huge_page(struct mm_struct *mm,
if (mm_find_pmd(mm, address) != pmd)
goto out;
vm_write_begin(vma);
anon_vma_lock_write(vma->anon_vma);
pte = pte_offset_map(pmd, address);
@ -1046,6 +1047,7 @@ static void collapse_huge_page(struct mm_struct *mm,
pmd_populate(mm, pmd, pmd_pgtable(_pmd));
spin_unlock(pmd_ptl);
anon_vma_unlock_write(vma->anon_vma);
vm_write_end(vma);
result = SCAN_FAIL;
goto out;
}
@ -1080,6 +1082,7 @@ static void collapse_huge_page(struct mm_struct *mm,
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
spin_unlock(pmd_ptl);
vm_write_end(vma);
*hpage = NULL;

@ -184,7 +184,9 @@ success:
/*
* vm_flags is protected by the mmap_sem held in write mode.
*/
vma->vm_flags = new_flags;
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
vm_write_end(vma);
out:
return error;
}
@ -450,9 +452,11 @@ static void madvise_free_page_range(struct mmu_gather *tlb,
.private = tlb,
};
vm_write_begin(vma);
tlb_start_vma(tlb, vma);
walk_page_range(addr, end, &free_walk);
tlb_end_vma(tlb, vma);
vm_write_end(vma);
}
static int madvise_free_single_vma(struct vm_area_struct *vma,

@ -379,8 +379,11 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
struct vm_area_struct *vma;
down_write(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next)
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vm_write_begin(vma);
mpol_rebind_policy(vma->vm_policy, new);
vm_write_end(vma);
}
up_write(&mm->mmap_sem);
}
@ -578,9 +581,11 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
{
int nr_updated;
vm_write_begin(vma);
nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
if (nr_updated)
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
vm_write_end(vma);
return nr_updated;
}
@ -681,6 +686,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,
if (IS_ERR(new))
return PTR_ERR(new);
vm_write_begin(vma);
if (vma->vm_ops && vma->vm_ops->set_policy) {
err = vma->vm_ops->set_policy(vma, new);
if (err)
@ -688,11 +694,17 @@ static int vma_replace_policy(struct vm_area_struct *vma,
}
old = vma->vm_policy;
vma->vm_policy = new; /* protected by mmap_sem */
/*
* The speculative page fault handler accesses this field without
* hodling the mmap_sem.
*/
WRITE_ONCE(vma->vm_policy, new);
vm_write_end(vma);
mpol_put(old);
return 0;
err_out:
vm_write_end(vma);
mpol_put(new);
return err;
}
@ -1584,23 +1596,28 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol = NULL;
struct mempolicy *pol;
if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) {
pol = vma->vm_ops->get_policy(vma, addr);
} else if (vma->vm_policy) {
pol = vma->vm_policy;
if (!vma)
return NULL;
/*
* shmem_alloc_page() passes MPOL_F_SHARED policy with
* a pseudo vma whose vma->vm_ops=NULL. Take a reference
* count on these policies which will be dropped by
* mpol_cond_put() later
*/
if (mpol_needs_cond_ref(pol))
mpol_get(pol);
}
if (vma->vm_ops && vma->vm_ops->get_policy)
return vma->vm_ops->get_policy(vma, addr);
/*
* This could be called without holding the mmap_sem in the
* speculative page fault handler's path.
*/
pol = READ_ONCE(vma->vm_policy);
if (pol) {
/*
* shmem_alloc_page() passes MPOL_F_SHARED policy with
* a pseudo vma whose vma->vm_ops=NULL. Take a reference
* count on these policies which will be dropped by
* mpol_cond_put() later
*/
if (mpol_needs_cond_ref(pol))
mpol_get(pol);
}
return pol;

@ -439,7 +439,9 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK);
vm_write_end(vma);
while (start < end) {
struct page *page;
@ -562,10 +564,11 @@ success:
* It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, populate_vma_page_range will bring it back.
*/
if (lock)
vma->vm_flags = newflags;
else
if (lock) {
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, newflags);
vm_write_end(vma);
} else
munlock_vma_pages_range(vma, start, end);
out:

@ -843,17 +843,18 @@ again:
}
if (start != vma->vm_start) {
vma->vm_start = start;
WRITE_ONCE(vma->vm_start, start);
start_changed = true;
}
if (end != vma->vm_end) {
vma->vm_end = end;
WRITE_ONCE(vma->vm_end, end);
end_changed = true;
}
vma->vm_pgoff = pgoff;
WRITE_ONCE(vma->vm_pgoff, pgoff);
if (adjust_next) {
next->vm_start += adjust_next << PAGE_SHIFT;
next->vm_pgoff += adjust_next;
WRITE_ONCE(next->vm_start,
next->vm_start + (adjust_next << PAGE_SHIFT));
WRITE_ONCE(next->vm_pgoff, next->vm_pgoff + adjust_next);
}
if (root) {
@ -1760,13 +1761,15 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
out:
perf_event_mmap(vma);
vm_write_begin(vma);
vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm)))
mm->locked_vm += (len >> PAGE_SHIFT);
else
vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
WRITE_ONCE(vma->vm_flags,
vma->vm_flags & VM_LOCKED_CLEAR_MASK);
}
if (file)
@ -1779,9 +1782,10 @@ out:
* then new mapped in-place (which must be aimed as
* a completely new data area).
*/
vma->vm_flags |= VM_SOFTDIRTY;
WRITE_ONCE(vma->vm_flags, vma->vm_flags | VM_SOFTDIRTY);
vma_set_page_prot(vma);
vm_write_end(vma);
return addr;
@ -2410,8 +2414,8 @@ int expand_downwards(struct vm_area_struct *vma,
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_start = address;
vma->vm_pgoff -= grow;
WRITE_ONCE(vma->vm_start, address);
WRITE_ONCE(vma->vm_pgoff, vma->vm_pgoff - grow);
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
spin_unlock(&mm->page_table_lock);

@ -361,12 +361,14 @@ success:
* vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode.
*/
vma->vm_flags = newflags;
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, newflags);
dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
vma_set_page_prot(vma);
change_protection(vma, start, end, vma->vm_page_prot,
dirty_accountable, 0);
vm_write_end(vma);
/*
* Private VM_LOCKED VMA becoming writable: trigger COW to avoid major

@ -560,6 +560,10 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* the readahead.
*
* Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
* This is needed to ensure the VMA will not be freed in our back. In the case
* of the speculative page fault handler, this cannot happen, even if we don't
* hold the mmap_sem. Callees are assumed to take care of reading VMA's fields
* using READ_ONCE() to read consistent values.
*/
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
@ -652,9 +656,9 @@ static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
unsigned long *start,
unsigned long *end)
{
*start = max3(lpfn, PFN_DOWN(vma->vm_start),
*start = max3(lpfn, PFN_DOWN(READ_ONCE(vma->vm_start)),
PFN_DOWN(faddr & PMD_MASK));
*end = min3(rpfn, PFN_DOWN(vma->vm_end),
*end = min3(rpfn, PFN_DOWN(READ_ONCE(vma->vm_end)),
PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
}

Loading…
Cancel
Save