summaryrefslogtreecommitdiff
path: root/src/util/u_queue.c
diff options
context:
space:
mode:
authorNicolai Hähnle <nicolai.haehnle@amd.com>2017-10-22 17:38:41 +0200
committerNicolai Hähnle <nicolai.haehnle@amd.com>2017-11-09 11:53:19 +0100
commit185061aef41401bead9e5d15aa00ffa2fcf7ef8c (patch)
treec04fd6225648d34f5ffd5c943162c47d5109b712 /src/util/u_queue.c
parentf0d3a4de75fdb865c058aba8614f0fe6ba5f0969 (diff)
downloadmesa-185061aef41401bead9e5d15aa00ffa2fcf7ef8c.tar.gz
u_queue: add util_queue_finish for waiting for previously added jobs
Schedule one job for every thread, and wait on a barrier inside the job execution function. v2: avoid alloca (fixes Windows build error) Reviewed-by: Marek Olšák <marek.olsak@amd.com> (v1)
Diffstat (limited to 'src/util/u_queue.c')
-rw-r--r--src/util/u_queue.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/src/util/u_queue.c b/src/util/u_queue.c
index 8293ec661b0..706ee8b04d9 100644
--- a/src/util/u_queue.c
+++ b/src/util/u_queue.c
@@ -25,7 +25,9 @@
*/
#include "u_queue.h"
+
#include "util/u_string.h"
+#include "util/u_thread.h"
static void util_queue_killall_and_wait(struct util_queue *queue);
@@ -429,6 +431,39 @@ util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence)
util_queue_fence_wait(fence);
}
+static void
+util_queue_finish_execute(void *data, int num_thread)
+{
+ util_barrier *barrier = data;
+ util_barrier_wait(barrier);
+}
+
+/**
+ * Wait until all previously added jobs have completed.
+ */
+void
+util_queue_finish(struct util_queue *queue)
+{
+ util_barrier barrier;
+ struct util_queue_fence *fences = malloc(queue->num_threads * sizeof(*fences));
+
+ util_barrier_init(&barrier, queue->num_threads);
+
+ for (unsigned i = 0; i < queue->num_threads; ++i) {
+ util_queue_fence_init(&fences[i]);
+ util_queue_add_job(queue, &barrier, &fences[i], util_queue_finish_execute, NULL);
+ }
+
+ for (unsigned i = 0; i < queue->num_threads; ++i) {
+ util_queue_fence_wait(&fences[i]);
+ util_queue_fence_destroy(&fences[i]);
+ }
+
+ util_barrier_destroy(&barrier);
+
+ free(fences);
+}
+
int64_t
util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
{