summaryrefslogtreecommitdiff
path: root/src/intel_batchbuffer.h
diff options
context:
space:
mode:
authorSreerenj Balachandran <sreerenj.balachandran@intel.com>2015-10-12 11:37:58 +0300
committerXiang, Haihao <haihao.xiang@intel.com>2015-10-13 14:38:48 +0800
commit35bd134b00b640beff65bc41c1245f7fa8687575 (patch)
treece52b5f77c6288b4d0b1d5387fd45971ed1a2d38 /src/intel_batchbuffer.h
parentf365892c6710be2d4e29759b99afb57903f62b59 (diff)
downloadlibva-intel-driver-35bd134b00b640beff65bc41c1245f7fa8687575.tar.gz
intel_batchbuffer: Add utility fuction for supporting 48-bit address relocations in Gen8+
There are Gen8+ instruction which requires 48bit address relocation (eg: Surface State Address in STATE_BASE_ADDRESS instruction). Add the batchbuffer utility funcation for the relocation based on Mesa's batch buffer implementation. Signed-off-by: Sreerenj Balachandran <sreerenj.balachandran@intel.com> Reviewed-by: Zhao Yakui <yakui.zhao@intel.com>
Diffstat (limited to 'src/intel_batchbuffer.h')
-rw-r--r--src/intel_batchbuffer.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/src/intel_batchbuffer.h b/src/intel_batchbuffer.h
index 41d359db..377e6ae7 100644
--- a/src/intel_batchbuffer.h
+++ b/src/intel_batchbuffer.h
@@ -40,6 +40,9 @@ void intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, unsigned int
void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, dri_bo *bo,
uint32_t read_domains, uint32_t write_domains,
uint32_t delta);
+void intel_batchbuffer_emit_reloc64(struct intel_batchbuffer *batch, dri_bo *bo,
+ uint32_t read_domains, uint32_t write_domains,
+ uint32_t delta);
void intel_batchbuffer_require_space(struct intel_batchbuffer *batch, unsigned int size);
void intel_batchbuffer_data(struct intel_batchbuffer *batch, void *data, unsigned int size);
void intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch);
@@ -78,6 +81,13 @@ void intel_batchbuffer_start_atomic_bcs_override(struct intel_batchbuffer *batch
delta); \
} while (0)
+/* Handle 48-bit address relocations for Gen8+ */
+#define __OUT_RELOC64(batch, bo, read_domains, write_domain, delta) do { \
+ intel_batchbuffer_emit_reloc64(batch, bo, \
+ read_domains, write_domain, \
+ delta); \
+ } while (0)
+
#define __ADVANCE_BATCH(batch) do { \
intel_batchbuffer_advance_batch(batch); \
} while (0)
@@ -98,6 +108,8 @@ void intel_batchbuffer_start_atomic_bcs_override(struct intel_batchbuffer *batch
__OUT_RELOC(batch, bo, read_domains, write_domain, delta)
#define OUT_BCS_RELOC(batch, bo, read_domains, write_domain, delta) \
__OUT_RELOC(batch, bo, read_domains, write_domain, delta)
+#define OUT_RELOC64(batch, bo, read_domains, write_domain, delta) \
+ __OUT_RELOC64(batch, bo, read_domains, write_domain, delta)
#define ADVANCE_BATCH(batch) __ADVANCE_BATCH(batch)
#define ADVANCE_BLT_BATCH(batch) __ADVANCE_BATCH(batch)