diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-07-02 15:07:52 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-07-02 15:10:45 -0300 |
commit | cc5dfd59e375f4d0f2b64643723d16b38b2f2d78 (patch) | |
tree | 0a8f526169ee889d6af4e7679122c946773ec33a /mm/migrate.c | |
parent | 9ec3f4cb35bc8278f0582fed9f9229c9315c2ffb (diff) | |
parent | b6b346a0665a8bf8b28fd851217c435a3eec4af9 (diff) | |
download | linux-cc5dfd59e375f4d0f2b64643723d16b38b2f2d78.tar.bz2 |
Merge branch 'hmm-devmem-cleanup.4' into rdma.git hmm
Christoph Hellwig says:
====================
Below is a series that cleans up the dev_pagemap interface so that it is
more easily usable, which removes the need to wrap it in hmm and thus
allowing to kill a lot of code
Changes since v3:
- pull in "mm/swap: Fix release_pages() when releasing devmap pages" and
rebase the other patches on top of that
- fold the hmm_devmem_add_resource into the DEVICE_PUBLIC memory removal
patch
- remove _vm_normal_page as it isn't needed without DEVICE_PUBLIC memory
- pick up various ACKs
Changes since v2:
- fix nvdimm kunit build
- add a new memory type for device dax
- fix a few issues in intermediate patches that didn't show up in the end
result
- incorporate feedback from Michal Hocko, including killing of
the DEVICE_PUBLIC memory type entirely
Changes since v1:
- rebase
- also switch p2pdma to the internal refcount
- add type checking for pgmap->type
- rename the migrate method to migrate_to_ram
- cleanup the altmap_valid flag
- various tidbits from the reviews
====================
Conflicts resolved by:
- Keeping Ira's version of the code in swap.c
- Using the delete for the section in hmm.rst
- Using the delete for the devmap code in hmm.c and .h
* branch 'hmm-devmem-cleanup.4': (24 commits)
mm: don't select MIGRATE_VMA_HELPER from HMM_MIRROR
mm: remove the HMM config option
mm: sort out the DEVICE_PRIVATE Kconfig mess
mm: simplify ZONE_DEVICE page private data
mm: remove hmm_devmem_add
mm: remove hmm_vma_alloc_locked_page
nouveau: use devm_memremap_pages directly
nouveau: use alloc_page_vma directly
PCI/P2PDMA: use the dev_pagemap internal refcount
device-dax: use the dev_pagemap internal refcount
memremap: provide an optional internal refcount in struct dev_pagemap
memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag
memremap: remove the data field in struct dev_pagemap
memremap: add a migrate_to_ram method to struct dev_pagemap_ops
memremap: lift the devmap_enable manipulation into devm_memremap_pages
memremap: pass a struct dev_pagemap to ->kill and ->cleanup
memremap: move dev_pagemap callbacks into a separate structure
memremap: validate the pagemap type passed to devm_memremap_pages
mm: factor out a devm_request_free_mem_region helper
mm: export alloc_pages_vma
...
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 28 |
1 files changed, 4 insertions, 24 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index f2ecc2855a12..78d45e184457 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -246,8 +246,6 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, if (is_device_private_page(new)) { entry = make_device_private_entry(new, pte_write(pte)); pte = swp_entry_to_pte(entry); - } else if (is_device_public_page(new)) { - pte = pte_mkdevmap(pte); } } @@ -381,7 +379,6 @@ static int expected_page_refs(struct address_space *mapping, struct page *page) * ZONE_DEVICE pages. */ expected_count += is_device_private_page(page); - expected_count += is_device_public_page(page); if (mapping) expected_count += hpage_nr_pages(page) + page_has_private(page); @@ -994,10 +991,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, if (!PageMappingFlags(page)) page->mapping = NULL; - if (unlikely(is_zone_device_page(newpage))) { - if (is_device_public_page(newpage)) - flush_dcache_page(newpage); - } else + if (likely(!is_zone_device_page(newpage))) flush_dcache_page(newpage); } @@ -2265,7 +2259,7 @@ again: pfn = 0; goto next; } - page = _vm_normal_page(migrate->vma, addr, pte, true); + page = vm_normal_page(migrate->vma, addr, pte); mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; } @@ -2406,16 +2400,7 @@ static bool migrate_vma_check_page(struct page *page) * FIXME proper solution is to rework migration_entry_wait() so * it does not need to take a reference on page. */ - if (is_device_private_page(page)) - return true; - - /* - * Only allow device public page to be migrated and account for - * the extra reference count imply by ZONE_DEVICE pages. - */ - if (!is_device_public_page(page)) - return false; - extra++; + return is_device_private_page(page); } /* For file back page */ @@ -2665,11 +2650,6 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); entry = swp_entry_to_pte(swp_entry); - } else if (is_device_public_page(page)) { - entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); - if (vma->vm_flags & VM_WRITE) - entry = pte_mkwrite(pte_mkdirty(entry)); - entry = pte_mkdevmap(entry); } } else { entry = mk_pte(page, vma->vm_page_prot); @@ -2789,7 +2769,7 @@ static void migrate_vma_pages(struct migrate_vma *migrate) migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; continue; } - } else if (!is_device_public_page(newpage)) { + } else { /* * Other types of ZONE_DEVICE page are not * supported. |