summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-05 13:28:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-05 13:28:50 -0700
commitfffe3ae0ee84e25d2befe2ae59bc32aa2b6bc77b (patch)
tree80db9b520298091787d70772530f51b90afb2709 /mm
parent8f7be6291529011a58856bf178f52ed5751c68ac (diff)
parent7d17e83abec1be3355260b3e4812044c65c32907 (diff)
downloadlinux-fffe3ae0ee84e25d2befe2ae59bc32aa2b6bc77b.tar.gz
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull hmm updates from Jason Gunthorpe: "Ralph has been working on nouveau's use of hmm_range_fault() and migrate_vma() which resulted in this small series. It adds reporting of the page table order from hmm_range_fault() and some optimization of migrate_vma(): - Report the size of the page table mapping out of hmm_range_fault(). This makes it easier to establish a large/huge/etc mapping in the device's page table. - Allow devices to ignore the invalidations during migration in cases where the migration is not going to change pages. For instance migrating pages to a device does not require the device to invalidate pages already in the device. - Update nouveau and hmm_tests to use the above" * tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: mm/hmm/test: use the new migration invalidation nouveau/svm: use the new migration invalidation mm/notifier: add migration invalidation type mm/migrate: add a flags parameter to migrate_vma nouveau: fix storing invalid ptes nouveau/hmm: support mapping large sysmem pages nouveau: fix mapping 2MB sysmem pages nouveau/hmm: fault one page at a time mm/hmm: add tests for hmm_pfn_to_map_order() mm/hmm: provide the page mapping order in hmm_range_fault()
Diffstat (limited to 'mm')
-rw-r--r--mm/hmm.c16
-rw-r--r--mm/migrate.c14
2 files changed, 24 insertions, 6 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index e9a545751108..0809baee49d0 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -165,12 +165,19 @@ static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
return hmm_pfns_fill(addr, end, range, 0);
}
+static inline unsigned long hmm_pfn_flags_order(unsigned long order)
+{
+ return order << HMM_PFN_ORDER_SHIFT;
+}
+
static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
pmd_t pmd)
{
if (pmd_protnone(pmd))
return 0;
- return pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
+ return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
+ HMM_PFN_VALID) |
+ hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -389,7 +396,9 @@ static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
{
if (!pud_present(pud))
return 0;
- return pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
+ return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
+ HMM_PFN_VALID) |
+ hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
}
static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
@@ -474,7 +483,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
- cpu_flags = pte_to_hmm_pfn_flags(range, entry);
+ cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
+ hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
required_fault =
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
if (required_fault) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 40cd7016ae6f..4fcc465736ff 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2276,7 +2276,9 @@ again:
goto next;
page = device_private_entry_to_page(entry);
- if (page->pgmap->owner != migrate->src_owner)
+ if (!(migrate->flags &
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
+ page->pgmap->owner != migrate->pgmap_owner)
goto next;
mpfn = migrate_pfn(page_to_pfn(page)) |
@@ -2284,7 +2286,7 @@ again:
if (is_write_device_private_entry(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
- if (migrate->src_owner)
+ if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
goto next;
pfn = pte_pfn(pte);
if (is_zero_pfn(pfn)) {
@@ -2379,8 +2381,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL,
+ /*
+ * Note that the pgmap_owner is passed to the mmu notifier callback so
+ * that the registered device driver can skip invalidating device
+ * private page mappings that won't be migrated.
+ */
+ mmu_notifier_range_init(&range, MMU_NOTIFY_MIGRATE, 0, migrate->vma,
migrate->vma->vm_mm, migrate->start, migrate->end);
+ range.migrate_pgmap_owner = migrate->pgmap_owner;
mmu_notifier_invalidate_range_start(&range);
walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,