summaryrefslogtreecommitdiff
path: root/src/mesa
diff options
context:
space:
mode:
authorMark Janes <mark.a.janes@intel.com>2019-07-10 16:57:16 -0700
committerMark Janes <mark.a.janes@intel.com>2019-08-07 21:33:56 -0700
commitdeea3798b6e7bc54e62cd6a7b1c08c4e51801144 (patch)
tree76fdf3ae9c8e798ecbcb2efcdb3fd5dedb99b3aa /src/mesa
parent1f4f421ce07e16d7bbef16c016fbc1d5b2055516 (diff)
downloadmesa-deea3798b6e7bc54e62cd6a7b1c08c4e51801144.tar.gz
intel/perf: make perf context private
Encapsulate the details of this data structure. Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Diffstat (limited to 'src/mesa')
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.c1
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.h2
-rw-r--r--src/mesa/drivers/dri/i965/brw_performance_query.c63
3 files changed, 27 insertions, 39 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c
index 957be4006f8..d4bbe0b2b6f 100644
--- a/src/mesa/drivers/dri/i965/brw_context.c
+++ b/src/mesa/drivers/dri/i965/brw_context.c
@@ -964,6 +964,7 @@ brwCreateContext(gl_api api,
*dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
return false;
}
+ brw->perf_ctx = gen_perf_new_context(brw);
driContextPriv->driverPrivate = brw;
brw->driContext = driContextPriv;
diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h
index bf720bfa172..2ac443bf032 100644
--- a/src/mesa/drivers/dri/i965/brw_context.h
+++ b/src/mesa/drivers/dri/i965/brw_context.h
@@ -1162,7 +1162,7 @@ struct brw_context
bool supported;
} predicate;
- struct gen_perf_context perf_ctx;
+ struct gen_perf_context *perf_ctx;
int num_atoms[BRW_NUM_PIPELINES];
const struct brw_tracked_state render_atoms[76];
diff --git a/src/mesa/drivers/dri/i965/brw_performance_query.c b/src/mesa/drivers/dri/i965/brw_performance_query.c
index 80a2ba6f4b4..f7016e4dd15 100644
--- a/src/mesa/drivers/dri/i965/brw_performance_query.c
+++ b/src/mesa/drivers/dri/i965/brw_performance_query.c
@@ -112,7 +112,7 @@ static void
dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
{
struct brw_context *ctx = brw_void;
- struct gen_perf_context *perf_ctx = &ctx->perf_ctx;
+ struct gen_perf_context *perf_ctx = ctx->perf_ctx;
struct gl_perf_query_object *o = query_void;
struct brw_perf_query_object * brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
@@ -128,7 +128,7 @@ static void
dump_perf_queries(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
- gen_perf_dump_query_count(&brw->perf_ctx);
+ gen_perf_dump_query_count(brw->perf_ctx);
_mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
}
@@ -144,28 +144,14 @@ brw_get_perf_query_info(struct gl_context *ctx,
GLuint *n_active)
{
struct brw_context *brw = brw_context(ctx);
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- const struct gen_perf_query_info *query =
- &perf_ctx->perf->queries[query_index];
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
+ struct gen_perf_config *perf_cfg = gen_perf_config(perf_ctx);
+ const struct gen_perf_query_info *query = &perf_cfg->queries[query_index];
*name = query->name;
*data_size = query->data_size;
*n_counters = query->n_counters;
-
- switch (query->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- *n_active = perf_ctx->n_active_oa_queries;
- break;
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- *n_active = perf_ctx->n_active_pipeline_stats_queries;
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
+ *n_active = gen_perf_active_queries(perf_ctx, query);
}
static GLuint
@@ -213,8 +199,9 @@ brw_get_perf_counter_info(struct gl_context *ctx,
GLuint64 *raw_max)
{
struct brw_context *brw = brw_context(ctx);
+ struct gen_perf_config *perf_cfg = gen_perf_config(brw->perf_ctx);
const struct gen_perf_query_info *query =
- &brw->perf_ctx.perf->queries[query_index];
+ &perf_cfg->queries[query_index];
const struct gen_perf_query_counter *counter =
&query->counters[counter_index];
@@ -260,7 +247,7 @@ brw_begin_perf_query(struct gl_context *ctx,
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
/* We can assume the frontend hides mistaken attempts to Begin a
* query object multiple times before its End. Similarly if an
@@ -291,7 +278,7 @@ brw_end_perf_query(struct gl_context *ctx,
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
DBG("End(%d)\n", o->Id);
gen_perf_end_query(perf_ctx, obj);
@@ -306,7 +293,7 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
assert(!o->Ready);
- gen_perf_wait_query(&brw->perf_ctx, obj, &brw->batch);
+ gen_perf_wait_query(brw->perf_ctx, obj, &brw->batch);
}
static bool
@@ -320,7 +307,7 @@ brw_is_perf_query_ready(struct gl_context *ctx,
if (o->Ready)
return true;
- return gen_perf_is_query_ready(&brw->perf_ctx, obj, &brw->batch);
+ return gen_perf_is_query_ready(brw->perf_ctx, obj, &brw->batch);
}
/**
@@ -349,7 +336,7 @@ brw_get_perf_query_data(struct gl_context *ctx,
*/
assert(o->Ready);
- gen_perf_get_query_data(&brw->perf_ctx, obj,
+ gen_perf_get_query_data(brw->perf_ctx, obj,
data_size, data, bytes_written);
}
@@ -357,7 +344,7 @@ static struct gl_perf_query_object *
brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
{
struct brw_context *brw = brw_context(ctx);
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
struct gen_perf_query_object * obj = gen_perf_new_query(perf_ctx, query_index);
if (unlikely(!obj))
return NULL;
@@ -380,7 +367,7 @@ brw_delete_perf_query(struct gl_context *ctx,
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
/* We can assume that the frontend waits for a query to complete
* before ever calling into here, so we don't have to worry about
@@ -482,12 +469,16 @@ brw_init_perf_query_info(struct gl_context *ctx)
struct brw_context *brw = brw_context(ctx);
const struct gen_device_info *devinfo = &brw->screen->devinfo;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- if (perf_ctx->perf)
- return perf_ctx->perf->n_queries;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
+ struct gen_perf_config *perf_cfg = gen_perf_config(perf_ctx);
- perf_ctx->perf = gen_perf_new(brw);
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
+ if (perf_cfg)
+ return perf_cfg->n_queries;
+
+ if (!oa_metrics_kernel_support(brw->screen->driScrnPriv->fd, devinfo))
+ return 0;
+
+ perf_cfg = gen_perf_new(ctx);
perf_cfg->vtbl.bo_alloc = brw_oa_bo_alloc;
perf_cfg->vtbl.bo_unreference = (bo_unreference_t)brw_bo_unreference;
@@ -507,11 +498,7 @@ brw_init_perf_query_info(struct gl_context *ctx)
gen_perf_init_context(perf_ctx, perf_cfg, brw, brw->bufmgr, devinfo,
brw->hw_ctx, brw->screen->driScrnPriv->fd);
-
- if (!oa_metrics_kernel_support(perf_ctx->drm_fd, devinfo))
- return 0;
-
- gen_perf_init_metrics(perf_cfg, devinfo, perf_ctx->drm_fd);
+ gen_perf_init_metrics(perf_cfg, devinfo, brw->screen->driScrnPriv->fd);
return perf_cfg->n_queries;
}