summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexandre Courbot <acourbot@nvidia.com>2014-06-03 20:01:39 +0900
committerAlexandre Courbot <acourbot@nvidia.com>2014-11-04 13:13:04 +0900
commit7ebb2059ef9a8f18672d1624871ccdc4a3ad840b (patch)
treee29c87dbda4afcba3de15eba46baa82d2d326454
parent636900724c32f8d5e896b508fdb18e31246a834e (diff)
downloadnouveau-7ebb2059ef9a8f18672d1624871ccdc4a3ad840b.tar.gz
drm/nouveau/vm: fix mapping of SG pages list
SG pages lists (not to be confused with scatterlists) are a list of 4KB memory pages used to define a nouveau_mem. Mapping them to a VM that does not use 4KB apertures resulted in each subsequent 4KB physical page being mapped into a larger VM aperture, thus creating an incorrect, overlapping mapping. This patch fixes this issue by detecting when such mappings occur and by skipping the required number of pages in the list to ensure a correct linear mapping. Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
-rw-r--r--nvkm/subdev/vm/base.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/nvkm/subdev/vm/base.c b/nvkm/subdev/vm/base.c
index f75a683bd..583463cfe 100644
--- a/nvkm/subdev/vm/base.c
+++ b/nvkm/subdev/vm/base.c
@@ -152,13 +152,18 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
end = (pte + num);
if (unlikely(end >= max))
end = max;
- len = end - pte;
+
+ /*
+ * Map pages >4KB one by one so we can fix the list pointer
+ * as to not map intermediate pages to the next PTE
+ */
+ len = bits ? 1 : end - pte;
vmm->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;
- list += len;
+ list += len << bits;
if (unlikely(end >= max)) {
pde++;
pte = 0;