summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexandre Courbot <acourbot@nvidia.com>2014-07-07 18:19:50 +0900
committerAlexandre Courbot <acourbot@nvidia.com>2014-10-31 11:47:40 +0900
commit636900724c32f8d5e896b508fdb18e31246a834e (patch)
tree10e4186883bacd7819b3d8a1e82affe69fcf1225
parentcb01cb23aeaede63bd40b627d6d0bee7eabd0b67 (diff)
downloadnouveau-636900724c32f8d5e896b508fdb18e31246a834e.tar.gz
drm: synchronize BOs when required
On architectures for which access to GPU memory is non-coherent, caches need to be flushed and invalidated explicitly when BO control changes between CPU and GPU. This patch adds buffer synchronization functions which invokes the correct API (PCI or DMA) to ensure synchronization is effective. Based on the TTM DMA cache helper patches by Lucas Stach. Signed-off-by: Lucas Stach <dev@lynxeye.de> Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
-rw-r--r--drm/nouveau_bo.c42
-rw-r--r--drm/nouveau_bo.h2
-rw-r--r--drm/nouveau_gem.c12
3 files changed, 56 insertions, 0 deletions
diff --git a/drm/nouveau_bo.c b/drm/nouveau_bo.c
index ed9a6946f..d2a4768e3 100644
--- a/drm/nouveau_bo.c
+++ b/drm/nouveau_bo.c
@@ -426,6 +426,46 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
ttm_bo_kunmap(&nvbo->kmap);
}
+void
+nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_device *device = nvkm_device(&drm->device);
+ struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ int i;
+
+ if (!ttm_dma)
+ return;
+
+ /* Don't waste time looping if the object is coherent */
+ if (nvbo->force_coherent)
+ return;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ dma_sync_single_for_device(nv_device_base(device),
+ ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+}
+
+void
+nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_device *device = nvkm_device(&drm->device);
+ struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ int i;
+
+ if (!ttm_dma)
+ return;
+
+ /* Don't waste time looping if the object is coherent */
+ if (nvbo->force_coherent)
+ return;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ dma_sync_single_for_cpu(nv_device_base(device),
+ ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+}
+
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
@@ -437,6 +477,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
+ nouveau_bo_sync_for_device(nvbo);
+
return 0;
}
diff --git a/drm/nouveau_bo.h b/drm/nouveau_bo.h
index 0f8bbd48a..c827f233e 100644
--- a/drm/nouveau_bo.h
+++ b/drm/nouveau_bo.h
@@ -85,6 +85,8 @@ void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu);
+void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
+void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drm/nouveau_gem.c b/drm/nouveau_gem.c
index 36951ee4b..42c34babc 100644
--- a/drm/nouveau_gem.c
+++ b/drm/nouveau_gem.c
@@ -867,6 +867,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
else
ret = lret;
}
+ nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem);
return ret;
@@ -876,6 +877,17 @@ int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_nouveau_gem_cpu_fini *req = data;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return -ENOENT;
+ nvbo = nouveau_gem_object(gem);
+
+ nouveau_bo_sync_for_device(nvbo);
+ drm_gem_object_unreference_unlocked(gem);
return 0;
}