summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2019-07-02 15:07:52 -0300
committerJason Gunthorpe <jgg@mellanox.com>2019-07-02 15:10:45 -0300
commitcc5dfd59e375f4d0f2b64643723d16b38b2f2d78 (patch)
tree0a8f526169ee889d6af4e7679122c946773ec33a /mm/memory.c
parent9ec3f4cb35bc8278f0582fed9f9229c9315c2ffb (diff)
parentb6b346a0665a8bf8b28fd851217c435a3eec4af9 (diff)
Merge branch 'hmm-devmem-cleanup.4' into rdma.git hmm
Christoph Hellwig says: ==================== Below is a series that cleans up the dev_pagemap interface so that it is more easily usable, which removes the need to wrap it in hmm and thus allowing to kill a lot of code Changes since v3: - pull in "mm/swap: Fix release_pages() when releasing devmap pages" and rebase the other patches on top of that - fold the hmm_devmem_add_resource into the DEVICE_PUBLIC memory removal patch - remove _vm_normal_page as it isn't needed without DEVICE_PUBLIC memory - pick up various ACKs Changes since v2: - fix nvdimm kunit build - add a new memory type for device dax - fix a few issues in intermediate patches that didn't show up in the end result - incorporate feedback from Michal Hocko, including killing of the DEVICE_PUBLIC memory type entirely Changes since v1: - rebase - also switch p2pdma to the internal refcount - add type checking for pgmap->type - rename the migrate method to migrate_to_ram - cleanup the altmap_valid flag - various tidbits from the reviews ==================== Conflicts resolved by: - Keeping Ira's version of the code in swap.c - Using the delete for the section in hmm.rst - Using the delete for the devmap code in hmm.c and .h * branch 'hmm-devmem-cleanup.4': (24 commits) mm: don't select MIGRATE_VMA_HELPER from HMM_MIRROR mm: remove the HMM config option mm: sort out the DEVICE_PRIVATE Kconfig mess mm: simplify ZONE_DEVICE page private data mm: remove hmm_devmem_add mm: remove hmm_vma_alloc_locked_page nouveau: use devm_memremap_pages directly nouveau: use alloc_page_vma directly PCI/P2PDMA: use the dev_pagemap internal refcount device-dax: use the dev_pagemap internal refcount memremap: provide an optional internal refcount in struct dev_pagemap memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag memremap: remove the data field in struct dev_pagemap memremap: add a migrate_to_ram method to struct dev_pagemap_ops memremap: lift the devmap_enable manipulation into devm_memremap_pages memremap: pass a struct dev_pagemap to ->kill and ->cleanup memremap: move dev_pagemap callbacks into a separate structure memremap: validate the pagemap type passed to devm_memremap_pages mm: factor out a devm_request_free_mem_region helper mm: export alloc_pages_vma ... Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c49
1 files changed, 5 insertions, 44 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ddf20bd0c317..d437ccdb210c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -571,8 +571,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
* PFNMAP mappings in order to support COWable mappings.
*
*/
-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
- pte_t pte, bool with_public_device)
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
@@ -585,29 +585,6 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
return NULL;
if (is_zero_pfn(pfn))
return NULL;
-
- /*
- * Device public pages are special pages (they are ZONE_DEVICE
- * pages but different from persistent memory). They behave
- * allmost like normal pages. The difference is that they are
- * not on the lru and thus should never be involve with any-
- * thing that involve lru manipulation (mlock, numa balancing,
- * ...).
- *
- * This is why we still want to return NULL for such page from
- * vm_normal_page() so that we do not have to special case all
- * call site of vm_normal_page().
- */
- if (likely(pfn <= highest_memmap_pfn)) {
- struct page *page = pfn_to_page(pfn);
-
- if (is_device_public_page(page)) {
- if (with_public_device)
- return page;
- return NULL;
- }
- }
-
if (pte_devmap(pte))
return NULL;
@@ -797,17 +774,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
rss[mm_counter(page)]++;
} else if (pte_devmap(pte)) {
page = pte_page(pte);
-
- /*
- * Cache coherent device memory behave like regular page and
- * not like persistent memory page. For more informations see
- * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h
- */
- if (is_device_public_page(page)) {
- get_page(page);
- page_dup_rmap(page, false);
- rss[mm_counter(page)]++;
- }
}
out_set_pte:
@@ -1063,7 +1029,7 @@ again:
if (pte_present(ptent)) {
struct page *page;
- page = _vm_normal_page(vma, addr, ptent, true);
+ page = vm_normal_page(vma, addr, ptent);
if (unlikely(details) && page) {
/*
* unmap_shared_mapping_pages() wants to
@@ -2782,13 +2748,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);
} else if (is_device_private_entry(entry)) {
- /*
- * For un-addressable device memory we call the pgmap
- * fault handler callback. The callback must migrate
- * the page back to some CPU accessible page.
- */
- ret = device_private_entry_fault(vma, vmf->address, entry,
- vmf->flags, vmf->pmd);
+ vmf->page = device_private_entry_to_page(entry);
+ ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else {