summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c35
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h3
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c32
3 files changed, 50 insertions, 20 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index b6169d3a93c1..e4f14aa8ca9f 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -125,6 +125,7 @@ vc4_bo_create(struct drm_device *dev, size_t unaligned_size)
uint32_t size = roundup(unaligned_size, PAGE_SIZE);
uint32_t page_index = bo_page_index(size);
struct drm_gem_cma_object *cma_obj;
+ int pass;
if (size == 0)
return NULL;
@@ -142,15 +143,35 @@ vc4_bo_create(struct drm_device *dev, size_t unaligned_size)
}
/* Otherwise, make a new BO. */
- cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj)) {
- /* If we've run out of CMA memory, kill the cache of
- * CMA allocations we've got laying around and try again.
- */
- vc4_bo_cache_purge(dev);
+ for (pass = 0; ; pass++) {
cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj))
+ if (!IS_ERR(cma_obj))
+ break;
+
+ switch (pass) {
+ case 0:
+ /*
+ * If we've run out of CMA memory, kill the cache of
+ * CMA allocations we've got laying around and try again.
+ */
+ vc4_bo_cache_purge(dev);
+ break;
+ case 1:
+ /*
+ * Getting desperate, so try to wait for any
+ * previous rendering to finish, free its
+ * unreferenced BOs to the cache, and then
+ * free the cache.
+ */
+ vc4_wait_for_seqno(dev, vc4->emit_seqno, ~0ull);
+ vc4_job_handle_completed(vc4);
+ vc4_bo_cache_purge(dev);
+ break;
+ case 3:
+ DRM_ERROR("Failed to allocate from CMA:\n");
+ vc4_bo_stats_dump();
return NULL;
+ }
}
bo_stats.num_allocated++;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 6b3e07cf8443..bc4384e3d4cb 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -387,6 +387,9 @@ int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vc4_submit_next_job(struct drm_device *dev);
+int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
+ uint64_t timeout_ns);
+void vc4_job_handle_completed(struct vc4_dev *vc4);
/* vc4_hdmi.c */
void vc4_hdmi_register(void);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 7bb446cc1f40..54fe0b83421b 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -117,7 +117,7 @@ submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
barrier();
}
-static int
+int
vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -477,21 +477,11 @@ vc4_complete_exec(struct vc4_exec_info *exec)
kfree(exec);
}
-/* Scheduled when any job has been completed, this walks the list of
- * jobs that had completed and unrefs their BOs and frees their exec
- * structs.
- */
-static void
-vc4_job_done_work(struct work_struct *work)
+void
+vc4_job_handle_completed(struct vc4_dev *vc4)
{
- struct vc4_dev *vc4 =
- container_of(work, struct vc4_dev, job_done_work);
- struct drm_device *dev = vc4->dev;
unsigned long irqflags;
- /* Need the struct lock for drm_gem_object_unreference(). */
- mutex_lock(&dev->struct_mutex);
-
spin_lock_irqsave(&vc4->job_lock, irqflags);
while (!list_empty(&vc4->job_done_list)) {
struct vc4_exec_info *exec =
@@ -505,6 +495,22 @@ vc4_job_done_work(struct work_struct *work)
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+}
+
+/* Scheduled when any job has been completed, this walks the list of
+ * jobs that had completed and unrefs their BOs and frees their exec
+ * structs.
+ */
+static void
+vc4_job_done_work(struct work_struct *work)
+{
+ struct vc4_dev *vc4 =
+ container_of(work, struct vc4_dev, job_done_work);
+ struct drm_device *dev = vc4->dev;
+
+ /* Need the struct lock for drm_gem_object_unreference(). */
+ mutex_lock(&dev->struct_mutex);
+ vc4_job_handle_completed(vc4);
mutex_unlock(&dev->struct_mutex);
}