diff options
author | James Morris <james.l.morris@oracle.com> | 2017-12-11 17:01:08 +1100 |
---|---|---|
committer | James Morris <james.l.morris@oracle.com> | 2017-12-11 17:01:08 +1100 |
commit | d21bd6898336a7892914d308d5e0868f0b863571 (patch) | |
tree | f5f756c25348b5a6c1ce9ddbaa7d1ecd1bef40b0 /arch/s390/mm/pgalloc.c | |
parent | 34d8751fd4ffa34e85ee7e85d34168b3f3f62b42 (diff) | |
parent | 50c4c4e268a2d7a3e58ebb698ac74da0de40ae36 (diff) |
Sync to v4.15-rc3 for security subsystem developers to work against.
Diffstat (limited to 'arch/s390/mm/pgalloc.c')
-rw-r--r-- | arch/s390/mm/pgalloc.c | 21 |
1 files changed, 9 insertions, 12 deletions
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 05f1f27e6708..cb364153c43c 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Page table allocation functions * @@ -70,10 +71,8 @@ static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; - if (current->active_mm == mm) { - clear_user_asce(); + if (current->active_mm == mm) set_user_asce(mm); - } __tlb_flush_local(); } @@ -84,8 +83,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); - if (end >= TASK_SIZE_MAX) - return -ENOMEM; rc = 0; notify = 0; while (mm->context.asce_limit < end) { @@ -158,13 +155,13 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) struct page *page_table_alloc_pgste(struct mm_struct *mm) { struct page *page; - unsigned long *table; + u64 *table; page = alloc_page(GFP_KERNEL); if (page) { - table = (unsigned long *) page_to_phys(page); - clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); - clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); + table = (u64 *)page_to_phys(page); + memset64(table, _PAGE_INVALID, PTRS_PER_PTE); + memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE); } return page; } @@ -221,12 +218,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm) if (mm_alloc_pgste(mm)) { /* Return 4K page table with PGSTEs */ atomic_set(&page->_mapcount, 3); - clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); - clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); + memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); + memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE); } else { /* Return the first 2K fragment of the page */ atomic_set(&page->_mapcount, 1); - clear_table(table, _PAGE_INVALID, PAGE_SIZE); + memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); spin_lock_bh(&mm->context.lock); list_add(&page->lru, &mm->context.pgtable_list); spin_unlock_bh(&mm->context.lock); |