summaryrefslogtreecommitdiff
path: root/chromium/third_party/minigbm
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2021-05-20 09:47:09 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2021-06-07 11:15:42 +0000
commit189d4fd8fad9e3c776873be51938cd31a42b6177 (patch)
tree6497caeff5e383937996768766ab3bb2081a40b2 /chromium/third_party/minigbm
parent8bc75099d364490b22f43a7ce366b366c08f4164 (diff)
downloadqtwebengine-chromium-189d4fd8fad9e3c776873be51938cd31a42b6177.tar.gz
BASELINE: Update Chromium to 90.0.4430.221
Change-Id: Iff4d9d18d2fcf1a576f3b1f453010f744a232920 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/minigbm')
-rw-r--r--chromium/third_party/minigbm/BUILD.gn24
-rw-r--r--chromium/third_party/minigbm/DIR_METADATA3
-rw-r--r--chromium/third_party/minigbm/OWNERS1
-rw-r--r--chromium/third_party/minigbm/src/Android.mk7
-rw-r--r--chromium/third_party/minigbm/src/Makefile3
-rw-r--r--chromium/third_party/minigbm/src/OWNERS13
-rw-r--r--chromium/third_party/minigbm/src/amdgpu.c447
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/Makefile4
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc59
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h15
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc303
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h16
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_handle.h34
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc27
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h4
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_types.h9
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc48
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc3/.clang-format19
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Allocator.cc128
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Allocator.h29
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3AllocatorService.cc30
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Mapper.cc490
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Mapper.h64
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Utils.cc402
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Utils.h39
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc3/android.hardware.graphics.allocator@3.0-service.minigbm.rc14
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc4/.clang-format19
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Allocator.cc122
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Allocator.h26
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4AllocatorService.cc30
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Mapper.cc1011
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Mapper.h80
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Utils.cc671
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Utils.h39
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm.rc24
-rw-r--r--chromium/third_party/minigbm/src/dri.c5
-rw-r--r--chromium/third_party/minigbm/src/drv.c76
-rw-r--r--chromium/third_party/minigbm/src/drv.h3
-rw-r--r--chromium/third_party/minigbm/src/drv_priv.h17
-rw-r--r--chromium/third_party/minigbm/src/dumb_driver.c53
-rw-r--r--chromium/third_party/minigbm/src/evdi.c29
-rw-r--r--chromium/third_party/minigbm/src/external/i915_drm.h2422
-rw-r--r--chromium/third_party/minigbm/src/external/virgl_hw.h (renamed from chromium/third_party/minigbm/src/virgl_hw.h)14
-rw-r--r--chromium/third_party/minigbm/src/external/virgl_protocol.h632
-rw-r--r--chromium/third_party/minigbm/src/external/virtgpu_drm.h (renamed from chromium/third_party/minigbm/src/virtgpu_drm.h)41
-rw-r--r--chromium/third_party/minigbm/src/exynos.c7
-rw-r--r--chromium/third_party/minigbm/src/gbm.c15
-rw-r--r--chromium/third_party/minigbm/src/gbm.h9
-rw-r--r--chromium/third_party/minigbm/src/helpers.c35
-rw-r--r--chromium/third_party/minigbm/src/i915.c222
-rw-r--r--chromium/third_party/minigbm/src/marvell.c34
-rw-r--r--chromium/third_party/minigbm/src/mediatek.c51
-rw-r--r--chromium/third_party/minigbm/src/meson.c35
-rw-r--r--chromium/third_party/minigbm/src/msm.c109
-rw-r--r--chromium/third_party/minigbm/src/nouveau.c29
-rwxr-xr-xchromium/third_party/minigbm/src/presubmit.sh3
-rw-r--r--chromium/third_party/minigbm/src/radeon.c29
-rw-r--r--chromium/third_party/minigbm/src/rockchip.c25
-rw-r--r--chromium/third_party/minigbm/src/synaptics.c33
-rw-r--r--chromium/third_party/minigbm/src/tegra.c14
-rw-r--r--chromium/third_party/minigbm/src/udl.c29
-rw-r--r--chromium/third_party/minigbm/src/vc4.c21
-rw-r--r--chromium/third_party/minigbm/src/vgem.c63
-rw-r--r--chromium/third_party/minigbm/src/virtio_gpu.c710
64 files changed, 8241 insertions, 778 deletions
diff --git a/chromium/third_party/minigbm/BUILD.gn b/chromium/third_party/minigbm/BUILD.gn
index a61140942d0..d3c4c21a095 100644
--- a/chromium/third_party/minigbm/BUILD.gn
+++ b/chromium/third_party/minigbm/BUILD.gn
@@ -3,6 +3,7 @@
# found in the LICENSE file.
import("//build/config/chromecast_build.gni")
+import("//build/config/chromeos/ui_mode.gni")
import("//build/config/linux/pkg_config.gni")
assert(is_linux || is_chromeos)
@@ -11,7 +12,7 @@ declare_args() {
# Controls whether the build should use the version of minigbm library shipped
# with the system. In release builds of desktop Linux and Chrome OS we use the
# system version.
- use_system_minigbm = is_linux && !is_chromecast
+ use_system_minigbm = (is_linux || is_chromeos_lacros) && !is_chromecast
use_amdgpu_minigbm = false
use_exynos_minigbm = false
@@ -40,12 +41,6 @@ if (!use_system_minigbm) {
if (use_intel_minigbm) {
defines += [ "DRV_I915" ]
}
- if (use_marvell_minigbm) {
- defines += [ "DRV_MARVELL" ]
- }
- if (use_mediatek_minigbm) {
- defines += [ "DRV_MEDIATEK" ]
- }
if (use_meson_minigbm) {
defines += [ "DRV_MESON" ]
}
@@ -58,12 +53,6 @@ if (!use_system_minigbm) {
if (use_rockchip_minigbm) {
defines += [ "DRV_ROCKCHIP" ]
}
- if (use_synaptics_minigbm) {
- defines += [ "DRV_SYNAPTICS" ]
- }
- if (use_tegra_minigbm) {
- defines += [ "DRV_TEGRA" ]
- }
if (use_vc4_minigbm) {
defines += [ "DRV_VC4" ]
}
@@ -74,25 +63,18 @@ if (!use_system_minigbm) {
"src/amdgpu.c",
"src/dri.c",
"src/drv.c",
- "src/evdi.c",
+ "src/dumb_driver.c",
"src/exynos.c",
"src/gbm.c",
"src/gbm_helpers.c",
"src/helpers.c",
"src/helpers_array.c",
"src/i915.c",
- "src/marvell.c",
"src/mediatek.c",
- "src/meson.c",
"src/msm.c",
- "src/nouveau.c",
- "src/radeon.c",
"src/rockchip.c",
- "src/synaptics.c",
"src/tegra.c",
- "src/udl.c",
"src/vc4.c",
- "src/vgem.c",
"src/virtio_gpu.c",
]
diff --git a/chromium/third_party/minigbm/DIR_METADATA b/chromium/third_party/minigbm/DIR_METADATA
new file mode 100644
index 00000000000..2b73a7bd4ca
--- /dev/null
+++ b/chromium/third_party/minigbm/DIR_METADATA
@@ -0,0 +1,3 @@
+monorail: {
+ component: "Internals>GPU"
+}
diff --git a/chromium/third_party/minigbm/OWNERS b/chromium/third_party/minigbm/OWNERS
index 437b03dcc01..6223bd56e36 100644
--- a/chromium/third_party/minigbm/OWNERS
+++ b/chromium/third_party/minigbm/OWNERS
@@ -3,4 +3,3 @@ spang@chromium.org
gurchetansingh@chromium.org
servolk@chromium.org
dnicoara@chromium.org
-# COMPONENT: Internals>GPU
diff --git a/chromium/third_party/minigbm/src/Android.mk b/chromium/third_party/minigbm/src/Android.mk
index 3eab7aeb8c0..18c1bba30e5 100644
--- a/chromium/third_party/minigbm/src/Android.mk
+++ b/chromium/third_party/minigbm/src/Android.mk
@@ -11,23 +11,18 @@ MINIGBM_SRC := \
amdgpu.c \
dri.c \
drv.c \
- evdi.c \
+ dumb_driver.c \
exynos.c \
helpers_array.c \
helpers.c \
i915.c \
- marvell.c \
mediatek.c \
meson.c \
msm.c \
- nouveau.c \
radeon.c \
rockchip.c \
- synaptics.c \
tegra.c \
- udl.c \
vc4.c \
- vgem.c \
virtio_gpu.c
MINIGBM_CPPFLAGS := -std=c++14
diff --git a/chromium/third_party/minigbm/src/Makefile b/chromium/third_party/minigbm/src/Makefile
index 35f92f21102..82380265fad 100644
--- a/chromium/third_party/minigbm/src/Makefile
+++ b/chromium/third_party/minigbm/src/Makefile
@@ -25,6 +25,9 @@ endif
ifdef DRV_MESON
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_meson)
endif
+ifdef DRV_MSM
+ CFLAGS += -ldl
+endif
ifdef DRV_RADEON
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_radeon)
endif
diff --git a/chromium/third_party/minigbm/src/OWNERS b/chromium/third_party/minigbm/src/OWNERS
index f4aa651e992..d9d5bf33fa5 100644
--- a/chromium/third_party/minigbm/src/OWNERS
+++ b/chromium/third_party/minigbm/src/OWNERS
@@ -1,10 +1,11 @@
-marcheu@chromium.org
-gurchetansingh@chromium.org
-hoegsberg@chromium.org
-tfiga@chromium.org
-ddavenport@chromium.org
dbehr@chromium.org
dcastagna@chromium.org
+ddavenport@chromium.org
+gurchetansingh@chromium.org
+hoegsberg@chromium.org
+ihf@chromium.org
lepton@chromium.org
-tutankhamen@chromium.org
+marcheu@chromium.org
stevensd@chromium.org
+tfiga@chromium.org
+tutankhamen@chromium.org
diff --git a/chromium/third_party/minigbm/src/amdgpu.c b/chromium/third_party/minigbm/src/amdgpu.c
index 795d1379060..93681cb13f4 100644
--- a/chromium/third_party/minigbm/src/amdgpu.c
+++ b/chromium/third_party/minigbm/src/amdgpu.c
@@ -26,19 +26,283 @@
/* DRI backend decides tiling in this case. */
#define TILE_TYPE_DRI 1
+/* Height alignement for Encoder/Decoder buffers */
+#define CHROME_HEIGHT_ALIGN 16
+
struct amdgpu_priv {
struct dri_driver dri;
int drm_version;
+
+ /* sdma */
+ struct drm_amdgpu_info_device dev_info;
+ uint32_t sdma_ctx;
+ uint32_t sdma_cmdbuf_bo;
+ uint64_t sdma_cmdbuf_addr;
+ uint64_t sdma_cmdbuf_size;
+ uint32_t *sdma_cmdbuf_map;
};
-const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888 };
+struct amdgpu_linear_vma_priv {
+ uint32_t handle;
+ uint32_t map_flags;
+};
+
+const static uint32_t render_target_formats[] = {
+ DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_ARGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB2101010,
+};
const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8,
DRM_FORMAT_NV21, DRM_FORMAT_NV12,
DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420 };
+static int query_dev_info(int fd, struct drm_amdgpu_info_device *dev_info)
+{
+ struct drm_amdgpu_info info_args = { 0 };
+
+ info_args.return_pointer = (uintptr_t)dev_info;
+ info_args.return_size = sizeof(*dev_info);
+ info_args.query = AMDGPU_INFO_DEV_INFO;
+
+ return drmCommandWrite(fd, DRM_AMDGPU_INFO, &info_args, sizeof(info_args));
+}
+
+static int sdma_init(struct amdgpu_priv *priv, int fd)
+{
+ union drm_amdgpu_ctx ctx_args = { { 0 } };
+ union drm_amdgpu_gem_create gem_create = { { 0 } };
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ union drm_amdgpu_gem_mmap gem_map = { { 0 } };
+ struct drm_gem_close gem_close = { 0 };
+ int ret;
+
+ /* Ensure we can make a submission without BO lists. */
+ if (priv->drm_version < 27)
+ return 0;
+
+ /* Anything outside this range needs adjustments to the SDMA copy commands */
+ if (priv->dev_info.family < AMDGPU_FAMILY_CI || priv->dev_info.family > AMDGPU_FAMILY_NV)
+ return 0;
+
+ ctx_args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+ if (ret < 0)
+ return ret;
+
+ priv->sdma_ctx = ctx_args.out.alloc.ctx_id;
+
+ priv->sdma_cmdbuf_size = ALIGN(4096, priv->dev_info.virtual_address_alignment);
+ gem_create.in.bo_size = priv->sdma_cmdbuf_size;
+ gem_create.in.alignment = 4096;
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_CREATE, &gem_create, sizeof(gem_create));
+ if (ret < 0)
+ goto fail_ctx;
+
+ priv->sdma_cmdbuf_bo = gem_create.out.handle;
+
+ priv->sdma_cmdbuf_addr =
+ ALIGN(priv->dev_info.virtual_address_offset, priv->dev_info.virtual_address_alignment);
+
+ /* Map the buffer into the GPU address space so we can use it from the GPU */
+ va_args.handle = priv->sdma_cmdbuf_bo;
+ va_args.operation = AMDGPU_VA_OP_MAP;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_EXECUTABLE;
+ va_args.va_address = priv->sdma_cmdbuf_addr;
+ va_args.offset_in_bo = 0;
+ va_args.map_size = priv->sdma_cmdbuf_size;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ goto fail_bo;
+
+ gem_map.in.handle = priv->sdma_cmdbuf_bo;
+ ret = drmIoctl(fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
+ if (ret)
+ goto fail_va;
+
+ priv->sdma_cmdbuf_map = mmap(0, priv->sdma_cmdbuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, gem_map.out.addr_ptr);
+ if (priv->sdma_cmdbuf_map == MAP_FAILED) {
+ priv->sdma_cmdbuf_map = NULL;
+ ret = -ENOMEM;
+ goto fail_va;
+ }
+
+ return 0;
+fail_va:
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = 0;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+fail_bo:
+ gem_close.handle = priv->sdma_cmdbuf_bo;
+ drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+fail_ctx:
+ memset(&ctx_args, 0, sizeof(ctx_args));
+ ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ ctx_args.in.ctx_id = priv->sdma_ctx;
+ drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+ return ret;
+}
+
+static void sdma_finish(struct amdgpu_priv *priv, int fd)
+{
+ union drm_amdgpu_ctx ctx_args = { { 0 } };
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ struct drm_gem_close gem_close = { 0 };
+
+ if (!priv->sdma_cmdbuf_map)
+ return;
+
+ va_args.handle = priv->sdma_cmdbuf_bo;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = 0;
+ va_args.va_address = priv->sdma_cmdbuf_addr;
+ va_args.offset_in_bo = 0;
+ va_args.map_size = priv->sdma_cmdbuf_size;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+ gem_close.handle = priv->sdma_cmdbuf_bo;
+ drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+
+ ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ ctx_args.in.ctx_id = priv->sdma_ctx;
+ drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+}
+
+static int sdma_copy(struct amdgpu_priv *priv, int fd, uint32_t src_handle, uint32_t dst_handle,
+ uint64_t size)
+{
+ const uint64_t max_size_per_cmd = 0x3fff00;
+ const uint32_t cmd_size = 7 * sizeof(uint32_t); /* 7 dwords, see loop below. */
+ const uint64_t max_commands = priv->sdma_cmdbuf_size / cmd_size;
+ uint64_t src_addr = priv->sdma_cmdbuf_addr + priv->sdma_cmdbuf_size;
+ uint64_t dst_addr = src_addr + size;
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ unsigned cmd = 0;
+ uint64_t remaining_size = size;
+ uint64_t cur_src_addr = src_addr;
+ uint64_t cur_dst_addr = dst_addr;
+ struct drm_amdgpu_cs_chunk_ib ib = { 0 };
+ struct drm_amdgpu_cs_chunk chunks[2] = { { 0 } };
+ uint64_t chunk_ptrs[2];
+ union drm_amdgpu_cs cs = { { 0 } };
+ struct drm_amdgpu_bo_list_in bo_list = { 0 };
+ struct drm_amdgpu_bo_list_entry bo_list_entries[3] = { { 0 } };
+ union drm_amdgpu_wait_cs wait_cs = { { 0 } };
+ int ret = 0;
+
+ if (size > UINT64_MAX - max_size_per_cmd ||
+ DIV_ROUND_UP(size, max_size_per_cmd) > max_commands)
+ return -ENOMEM;
+
+ /* Map both buffers into the GPU address space so we can access them from the GPU. */
+ va_args.handle = src_handle;
+ va_args.operation = AMDGPU_VA_OP_MAP;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = src_addr;
+ va_args.map_size = size;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ return ret;
+
+ va_args.handle = dst_handle;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = dst_addr;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ goto unmap_src;
+
+ while (remaining_size) {
+ uint64_t cur_size = remaining_size;
+ if (cur_size > max_size_per_cmd)
+ cur_size = max_size_per_cmd;
+
+ priv->sdma_cmdbuf_map[cmd++] = 0x01; /* linear copy */
+ priv->sdma_cmdbuf_map[cmd++] =
+ priv->dev_info.family >= AMDGPU_FAMILY_AI ? (cur_size - 1) : cur_size;
+ priv->sdma_cmdbuf_map[cmd++] = 0;
+ priv->sdma_cmdbuf_map[cmd++] = cur_src_addr;
+ priv->sdma_cmdbuf_map[cmd++] = cur_src_addr >> 32;
+ priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr;
+ priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr >> 32;
+
+ remaining_size -= cur_size;
+ cur_src_addr += cur_size;
+ cur_dst_addr += cur_size;
+ }
+
+ ib.va_start = priv->sdma_cmdbuf_addr;
+ ib.ib_bytes = cmd * 4;
+ ib.ip_type = AMDGPU_HW_IP_DMA;
+
+ chunks[1].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[1].length_dw = sizeof(ib) / 4;
+ chunks[1].chunk_data = (uintptr_t)&ib;
+
+ bo_list_entries[0].bo_handle = priv->sdma_cmdbuf_bo;
+ bo_list_entries[0].bo_priority = 8; /* Middle of range, like RADV. */
+ bo_list_entries[1].bo_handle = src_handle;
+ bo_list_entries[1].bo_priority = 8;
+ bo_list_entries[2].bo_handle = dst_handle;
+ bo_list_entries[2].bo_priority = 8;
+
+ bo_list.bo_number = 3;
+ bo_list.bo_info_size = sizeof(bo_list_entries[0]);
+ bo_list.bo_info_ptr = (uintptr_t)bo_list_entries;
+
+ chunks[0].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
+ chunks[0].length_dw = sizeof(bo_list) / 4;
+ chunks[0].chunk_data = (uintptr_t)&bo_list;
+
+ chunk_ptrs[0] = (uintptr_t)&chunks[0];
+ chunk_ptrs[1] = (uintptr_t)&chunks[1];
+
+ cs.in.ctx_id = priv->sdma_ctx;
+ cs.in.num_chunks = 2;
+ cs.in.chunks = (uintptr_t)chunk_ptrs;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
+ if (ret) {
+ drv_log("SDMA copy command buffer submission failed %d\n", ret);
+ goto unmap_dst;
+ }
+
+ wait_cs.in.handle = cs.out.handle;
+ wait_cs.in.ip_type = AMDGPU_HW_IP_DMA;
+ wait_cs.in.ctx_id = priv->sdma_ctx;
+ wait_cs.in.timeout = INT64_MAX;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_WAIT_CS, &wait_cs, sizeof(wait_cs));
+ if (ret) {
+ drv_log("Could not wait for CS to finish\n");
+ } else if (wait_cs.out.status) {
+ drv_log("Infinite wait timed out, likely GPU hang.\n");
+ ret = -ENODEV;
+ }
+
+unmap_dst:
+ va_args.handle = dst_handle;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = dst_addr;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+unmap_src:
+ va_args.handle = src_handle;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = src_addr;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+ return ret;
+}
+
static int amdgpu_init(struct driver *drv)
{
struct amdgpu_priv *priv;
@@ -61,12 +325,23 @@ static int amdgpu_init(struct driver *drv)
drv->priv = priv;
+ if (query_dev_info(drv_get_fd(drv), &priv->dev_info)) {
+ free(priv);
+ drv->priv = NULL;
+ return -ENODEV;
+ }
if (dri_init(drv, DRI_PATH, "radeonsi")) {
free(priv);
drv->priv = NULL;
return -ENODEV;
}
+ if (sdma_init(priv, drv_get_fd(drv))) {
+ drv_log("SDMA init failed\n");
+
+ /* Continue, as we can still succesfully map things without SDMA. */
+ }
+
metadata.tiling = TILE_TYPE_LINEAR;
metadata.priority = 1;
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
@@ -77,12 +352,10 @@ static int amdgpu_init(struct driver *drv)
drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
&metadata, BO_USE_TEXTURE_MASK);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_ENCODER);
+ /* NV12 format for camera, display, decoding and encoding. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
@@ -93,19 +366,20 @@ static int amdgpu_init(struct driver *drv)
drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
- /* YUV formats for camera and display. */
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
- BO_USE_HW_VIDEO_DECODER);
+ drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
- * from camera.
+ * from camera and input/output from hardware decoder/encoder.
*/
drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
/*
* The following formats will be allocated by the DRI backend and may be potentially tiled.
@@ -128,11 +402,17 @@ static int amdgpu_init(struct driver *drv)
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
+
+ drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
return 0;
}
static void amdgpu_close(struct driver *drv)
{
+ sdma_finish(drv->priv, drv_get_fd(drv));
dri_close(drv);
free(drv->priv);
drv->priv = NULL;
@@ -142,16 +422,39 @@ static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t heigh
uint64_t use_flags)
{
int ret;
+ size_t num_planes;
uint32_t plane, stride;
- union drm_amdgpu_gem_create gem_create;
+ union drm_amdgpu_gem_create gem_create = { { 0 } };
+ struct amdgpu_priv *priv = bo->drv->priv;
stride = drv_stride_from_format(format, width, 0);
- stride = ALIGN(stride, 256);
+ num_planes = drv_num_planes_from_format(format);
+
+ /*
+ * For multiplane formats, align the stride to 512 to ensure that subsample strides are 256
+ * aligned. This uses more memory than necessary since the first plane only needs to be
+ * 256 aligned, but it's acceptable for a short-term fix. It's probably safe for other gpu
+ * families, but let's restrict it to Raven for now (b/171013552).
+ * */
+ if (priv->dev_info.family == AMDGPU_FAMILY_RV && num_planes > 1)
+ stride = ALIGN(stride, 512);
+ else
+ stride = ALIGN(stride, 256);
+
+ /*
+ * Currently, allocator used by chrome aligns the height for Encoder/
+ * Decoder buffers while allocator used by android(gralloc/minigbm)
+ * doesn't provide any aligment.
+ *
+ * See b/153130069
+ */
+ if (use_flags & (BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER))
+ height = ALIGN(height, CHROME_HEIGHT_ALIGN);
drv_bo_from_format(bo, stride, height, format);
- memset(&gem_create, 0, sizeof(gem_create));
- gem_create.in.bo_size = bo->meta.total_size;
+ gem_create.in.bo_size =
+ ALIGN(bo->meta.total_size, priv->dev_info.virtual_address_alignment);
gem_create.in.alignment = 256;
gem_create.in.domain_flags = 0;
@@ -159,7 +462,11 @@ static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t heigh
gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
- if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SCANOUT)))
+
+ /* Scanout in GTT requires USWC, otherwise try to use cachable memory
+ * for buffers that are read often, because uncacheable reads can be
+ * very slow. USWC should be faster on the GPU though. */
+ if ((use_flags & BO_USE_SCANOUT) || !(use_flags & BO_USE_SW_READ_OFTEN))
gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
/* Allocate the buffer with the preferred heap. */
@@ -257,44 +564,122 @@ static int amdgpu_destroy_bo(struct bo *bo)
static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
+ void *addr = MAP_FAILED;
int ret;
- union drm_amdgpu_gem_mmap gem_map;
+ union drm_amdgpu_gem_mmap gem_map = { { 0 } };
+ struct drm_amdgpu_gem_create_in bo_info = { 0 };
+ struct drm_amdgpu_gem_op gem_op = { 0 };
+ uint32_t handle = bo->handles[plane].u32;
+ struct amdgpu_linear_vma_priv *priv = NULL;
+ struct amdgpu_priv *drv_priv;
if (bo->priv)
return dri_bo_map(bo, vma, plane, map_flags);
- memset(&gem_map, 0, sizeof(gem_map));
- gem_map.in.handle = bo->handles[plane].u32;
+ drv_priv = bo->drv->priv;
+ gem_op.handle = handle;
+ gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
+ gem_op.value = (uintptr_t)&bo_info;
+
+ ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
+ if (ret)
+ return MAP_FAILED;
+
+ vma->length = bo_info.bo_size;
+
+ if (((bo_info.domains & AMDGPU_GEM_DOMAIN_VRAM) ||
+ (bo_info.domain_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)) &&
+ drv_priv->sdma_cmdbuf_map) {
+ union drm_amdgpu_gem_create gem_create = { { 0 } };
+
+ priv = calloc(1, sizeof(struct amdgpu_linear_vma_priv));
+ if (!priv)
+ return MAP_FAILED;
+ gem_create.in.bo_size = bo_info.bo_size;
+ gem_create.in.alignment = 4096;
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_CREATE, &gem_create,
+ sizeof(gem_create));
+ if (ret < 0) {
+ drv_log("GEM create failed\n");
+ free(priv);
+ return MAP_FAILED;
+ }
+
+ priv->map_flags = map_flags;
+ handle = priv->handle = gem_create.out.handle;
+
+ ret = sdma_copy(bo->drv->priv, bo->drv->fd, bo->handles[0].u32, priv->handle,
+ bo_info.bo_size);
+ if (ret) {
+ drv_log("SDMA copy for read failed\n");
+ goto fail;
+ }
+ }
+
+ gem_map.in.handle = handle;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
if (ret) {
drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
- return MAP_FAILED;
+ goto fail;
}
- vma->length = bo->meta.total_size;
-
- return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.out.addr_ptr);
+ if (addr == MAP_FAILED)
+ goto fail;
+
+ vma->priv = priv;
+ return addr;
+
+fail:
+ if (priv) {
+ struct drm_gem_close gem_close = { 0 };
+ gem_close.handle = priv->handle;
+ drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ free(priv);
+ }
+ return MAP_FAILED;
}
static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
{
if (bo->priv)
return dri_bo_unmap(bo, vma);
- else
- return munmap(vma->addr, vma->length);
+ else {
+ int r = munmap(vma->addr, vma->length);
+ if (r)
+ return r;
+
+ if (vma->priv) {
+ struct amdgpu_linear_vma_priv *priv = vma->priv;
+ struct drm_gem_close gem_close = { 0 };
+
+ if (BO_MAP_WRITE & priv->map_flags) {
+ r = sdma_copy(bo->drv->priv, bo->drv->fd, priv->handle,
+ bo->handles[0].u32, vma->length);
+ if (r)
+ return r;
+ }
+
+ gem_close.handle = priv->handle;
+ r = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ }
+
+ return 0;
+ }
}
static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
int ret;
- union drm_amdgpu_gem_wait_idle wait_idle;
+ union drm_amdgpu_gem_wait_idle wait_idle = { { 0 } };
if (bo->priv)
return 0;
- memset(&wait_idle, 0, sizeof(wait_idle));
wait_idle.in.handle = bo->handles[0].u32;
wait_idle.in.timeout = AMDGPU_TIMEOUT_INFINITE;
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/Makefile b/chromium/third_party/minigbm/src/cros_gralloc/Makefile
index 17e884fb210..c95ad2cd2ea 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/Makefile
+++ b/chromium/third_party/minigbm/src/cros_gralloc/Makefile
@@ -16,9 +16,9 @@ VPATH = $(dir $(SOURCES))
LIBDRM_CFLAGS := $(shell $(PKG_CONFIG) --cflags libdrm)
LIBDRM_LIBS := $(shell $(PKG_CONFIG) --libs libdrm)
-CPPFLAGS += -Wall -fPIC -Werror -flto $(LIBDRM_CFLAGS)
+CPPFLAGS += -Wall -fPIC -Werror -flto $(LIBDRM_CFLAGS) -D_GNU_SOURCE=1
CXXFLAGS += -std=c++14
-CFLAGS += -std=c99
+CFLAGS += -std=c99 -D_GNU_SOURCE=1
LIBS += -shared -lcutils -lhardware -lsync $(LIBDRM_LIBS)
OBJS = $(foreach source, $(SOURCES), $(addsuffix .o, $(basename $(source))))
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc
index 1066edccfda..2982505ad8b 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc
@@ -10,8 +10,11 @@
#include <sys/mman.h>
cros_gralloc_buffer::cros_gralloc_buffer(uint32_t id, struct bo *acquire_bo,
- struct cros_gralloc_handle *acquire_handle)
- : id_(id), bo_(acquire_bo), hnd_(acquire_handle), refcount_(1), lockcount_(0)
+ struct cros_gralloc_handle *acquire_handle,
+ int32_t reserved_region_fd, uint64_t reserved_region_size)
+ : id_(id), bo_(acquire_bo), hnd_(acquire_handle), refcount_(1), lockcount_(0),
+ reserved_region_fd_(reserved_region_fd), reserved_region_size_(reserved_region_size),
+ reserved_region_addr_(nullptr)
{
assert(bo_);
num_planes_ = drv_bo_get_num_planes(bo_);
@@ -26,6 +29,9 @@ cros_gralloc_buffer::~cros_gralloc_buffer()
native_handle_close(&hnd_->base);
delete hnd_;
}
+ if (reserved_region_addr_) {
+ munmap(reserved_region_addr_, reserved_region_size_);
+ }
}
uint32_t cros_gralloc_buffer::get_id() const
@@ -114,3 +120,52 @@ int32_t cros_gralloc_buffer::resource_info(uint32_t strides[DRV_MAX_PLANES],
{
return drv_resource_info(bo_, strides, offsets);
}
+
+int32_t cros_gralloc_buffer::invalidate()
+{
+ if (lockcount_ <= 0) {
+ drv_log("Buffer was not locked.\n");
+ return -EINVAL;
+ }
+
+ if (lock_data_[0]) {
+ return drv_bo_invalidate(bo_, lock_data_[0]);
+ }
+
+ return 0;
+}
+
+int32_t cros_gralloc_buffer::flush()
+{
+ if (lockcount_ <= 0) {
+ drv_log("Buffer was not locked.\n");
+ return -EINVAL;
+ }
+
+ if (lock_data_[0]) {
+ return drv_bo_flush(bo_, lock_data_[0]);
+ }
+
+ return 0;
+}
+
+int32_t cros_gralloc_buffer::get_reserved_region(void **addr, uint64_t *size)
+{
+ if (reserved_region_fd_ <= 0) {
+ drv_log("Buffer does not have reserved region.\n");
+ return -EINVAL;
+ }
+
+ if (!reserved_region_addr_) {
+ reserved_region_addr_ = mmap(nullptr, reserved_region_size_, PROT_WRITE | PROT_READ,
+ MAP_SHARED, reserved_region_fd_, 0);
+ if (reserved_region_addr_ == MAP_FAILED) {
+ drv_log("Failed to mmap reserved region: %s.\n", strerror(errno));
+ return -errno;
+ }
+ }
+
+ *addr = reserved_region_addr_;
+ *size = reserved_region_size_;
+ return 0;
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h
index ebd72ec2cac..cb6cb4b8da4 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h
@@ -14,7 +14,8 @@ class cros_gralloc_buffer
{
public:
cros_gralloc_buffer(uint32_t id, struct bo *acquire_bo,
- struct cros_gralloc_handle *acquire_handle);
+ struct cros_gralloc_handle *acquire_handle, int32_t reserved_region_fd,
+ uint64_t reserved_region_size);
~cros_gralloc_buffer();
uint32_t get_id() const;
@@ -28,12 +29,19 @@ class cros_gralloc_buffer
int32_t unlock();
int32_t resource_info(uint32_t strides[DRV_MAX_PLANES], uint32_t offsets[DRV_MAX_PLANES]);
+ int32_t invalidate();
+ int32_t flush();
+
+ int32_t get_reserved_region(void **reserved_region_addr, uint64_t *reserved_region_size);
+
private:
cros_gralloc_buffer(cros_gralloc_buffer const &);
cros_gralloc_buffer operator=(cros_gralloc_buffer const &);
uint32_t id_;
struct bo *bo_;
+
+ /* Note: this will be nullptr for imported/retained buffers. */
struct cros_gralloc_handle *hnd_;
int32_t refcount_;
@@ -41,6 +49,11 @@ class cros_gralloc_buffer
uint32_t num_planes_;
struct mapping *lock_data_[DRV_MAX_PLANES];
+
+ /* Optional additional shared memory region attached to some gralloc buffers. */
+ int32_t reserved_region_fd_;
+ uint64_t reserved_region_size_;
+ void *reserved_region_addr_;
};
#endif
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc
index 62b43d4c4a3..d9e6cf573f3 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc
@@ -5,12 +5,46 @@
*/
#include "cros_gralloc_driver.h"
-#include "../util.h"
#include <cstdlib>
#include <fcntl.h>
+#include <sys/mman.h>
+#include <syscall.h>
#include <xf86drm.h>
+#include "../drv_priv.h"
+#include "../helpers.h"
+#include "../util.h"
+
+// Constants taken from pipe_loader_drm.c in Mesa
+
+#define DRM_NUM_NODES 63
+
+// DRM Render nodes start at 128
+#define DRM_RENDER_NODE_START 128
+
+// DRM Card nodes start at 0
+#define DRM_CARD_NODE_START 0
+
+int memfd_create_wrapper(const char *name, unsigned int flags)
+{
+ int fd;
+
+#if defined(HAVE_MEMFD_CREATE)
+ fd = memfd_create(name, flags);
+#elif defined(__NR_memfd_create)
+ fd = syscall(__NR_memfd_create, name, flags);
+#else
+ drv_log("Failed to create memfd '%s': memfd_create not available.", name);
+ return -1;
+#endif
+
+ if (fd == -1) {
+ drv_log("Failed to create memfd '%s': %s.\n", name, strerror(errno));
+ }
+ return fd;
+}
+
cros_gralloc_driver::cros_gralloc_driver() : drv_(nullptr)
{
}
@@ -28,54 +62,57 @@ cros_gralloc_driver::~cros_gralloc_driver()
}
}
+static struct driver *init_try_node(int idx, char const *str)
+{
+ int fd;
+ char *node;
+ struct driver *drv;
+
+ if (asprintf(&node, str, DRM_DIR_NAME, idx) < 0)
+ return NULL;
+
+ fd = open(node, O_RDWR, 0);
+ free(node);
+
+ if (fd < 0)
+ return NULL;
+
+ drv = drv_create(fd);
+ if (!drv)
+ close(fd);
+
+ return drv;
+}
+
int32_t cros_gralloc_driver::init()
{
/*
- * Create a driver from rendernode while filtering out
- * the specified undesired driver.
+ * Create a driver from render nodes first, then try card
+ * nodes.
*
* TODO(gsingh): Enable render nodes on udl/evdi.
*/
- int fd;
- drmVersionPtr version;
- char const *str = "%s/renderD%d";
- const char *undesired[2] = { "vgem", nullptr };
- uint32_t num_nodes = 63;
- uint32_t min_node = 128;
- uint32_t max_node = (min_node + num_nodes);
-
- for (uint32_t i = 0; i < ARRAY_SIZE(undesired); i++) {
- for (uint32_t j = min_node; j < max_node; j++) {
- char *node;
- if (asprintf(&node, str, DRM_DIR_NAME, j) < 0)
- continue;
-
- fd = open(node, O_RDWR, 0);
- free(node);
-
- if (fd < 0)
- continue;
-
- version = drmGetVersion(fd);
- if (!version) {
- close(fd);
- continue;
- }
-
- if (undesired[i] && !strcmp(version->name, undesired[i])) {
- close(fd);
- drmFreeVersion(version);
- continue;
- }
-
- drmFreeVersion(version);
- drv_ = drv_create(fd);
- if (drv_)
- return 0;
-
- close(fd);
- }
+ char const *render_nodes_fmt = "%s/renderD%d";
+ char const *card_nodes_fmt = "%s/card%d";
+ uint32_t num_nodes = DRM_NUM_NODES;
+ uint32_t min_render_node = DRM_RENDER_NODE_START;
+ uint32_t max_render_node = (min_render_node + num_nodes);
+ uint32_t min_card_node = DRM_CARD_NODE_START;
+ uint32_t max_card_node = (min_card_node + num_nodes);
+
+ // Try render nodes...
+ for (uint32_t i = min_render_node; i < max_render_node; i++) {
+ drv_ = init_try_node(i, render_nodes_fmt);
+ if (drv_)
+ return 0;
+ }
+
+ // Try card nodes... for vkms mostly.
+ for (uint32_t i = min_card_node; i < max_card_node; i++) {
+ drv_ = init_try_node(i, card_nodes_fmt);
+ if (drv_)
+ return 0;
}
return -ENODEV;
@@ -90,15 +127,36 @@ bool cros_gralloc_driver::is_supported(const struct cros_gralloc_buffer_descript
return (combo != nullptr);
}
+int32_t create_reserved_region(const std::string &buffer_name, uint64_t reserved_region_size)
+{
+ std::string reserved_region_name = buffer_name + " reserved region";
+
+ int32_t reserved_region_fd = memfd_create_wrapper(reserved_region_name.c_str(), FD_CLOEXEC);
+ if (reserved_region_fd == -1) {
+ return -1;
+ }
+
+ if (ftruncate(reserved_region_fd, reserved_region_size)) {
+ drv_log("Failed to set reserved region size: %s.\n", strerror(errno));
+ return -errno;
+ }
+
+ return reserved_region_fd;
+}
+
int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descriptor *descriptor,
buffer_handle_t *out_handle)
{
uint32_t id;
- uint64_t mod;
size_t num_planes;
+ size_t num_fds;
+ size_t num_ints;
+ size_t num_bytes;
uint32_t resolved_format;
uint32_t bytes_per_pixel;
uint64_t use_flags;
+ int32_t reserved_region_fd;
+ char *name;
struct bo *bo;
struct cros_gralloc_handle *hnd;
@@ -140,41 +198,73 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto
return -EINVAL;
}
- hnd = new cros_gralloc_handle();
num_planes = drv_bo_get_num_planes(bo);
+ num_fds = num_planes;
+
+ if (descriptor->reserved_region_size > 0) {
+ reserved_region_fd =
+ create_reserved_region(descriptor->name, descriptor->reserved_region_size);
+ if (reserved_region_fd < 0) {
+ drv_bo_destroy(bo);
+ return reserved_region_fd;
+ }
+ num_fds += 1;
+ } else {
+ reserved_region_fd = -1;
+ }
+ num_bytes = sizeof(struct cros_gralloc_handle);
+ num_bytes += (descriptor->name.size() + 1);
+ /*
+ * Ensure that the total number of bytes is a multiple of sizeof(int) as
+ * native_handle_clone() copies data based on hnd->base.numInts.
+ */
+ num_bytes = ALIGN(num_bytes, sizeof(int));
+ num_ints = num_bytes - sizeof(native_handle_t) - num_fds;
+ /*
+ * Malloc is used as handles are ultimately destroyed via free in
+ * native_handle_delete().
+ */
+ hnd = static_cast<struct cros_gralloc_handle *>(malloc(num_bytes));
hnd->base.version = sizeof(hnd->base);
- hnd->base.numFds = num_planes;
- hnd->base.numInts = handle_data_size - num_planes;
-
+ hnd->base.numFds = num_fds;
+ hnd->base.numInts = num_ints;
+ hnd->num_planes = num_planes;
for (size_t plane = 0; plane < num_planes; plane++) {
hnd->fds[plane] = drv_bo_get_plane_fd(bo, plane);
hnd->strides[plane] = drv_bo_get_plane_stride(bo, plane);
hnd->offsets[plane] = drv_bo_get_plane_offset(bo, plane);
-
- mod = drv_bo_get_plane_format_modifier(bo, plane);
- hnd->format_modifiers[2 * plane] = static_cast<uint32_t>(mod >> 32);
- hnd->format_modifiers[2 * plane + 1] = static_cast<uint32_t>(mod);
+ hnd->sizes[plane] = drv_bo_get_plane_size(bo, plane);
}
-
+ hnd->fds[hnd->num_planes] = reserved_region_fd;
+ hnd->reserved_region_size = descriptor->reserved_region_size;
+ static std::atomic<uint32_t> next_buffer_id{ 1 };
+ hnd->id = next_buffer_id++;
hnd->width = drv_bo_get_width(bo);
hnd->height = drv_bo_get_height(bo);
hnd->format = drv_bo_get_format(bo);
- hnd->use_flags[0] = static_cast<uint32_t>(descriptor->use_flags >> 32);
- hnd->use_flags[1] = static_cast<uint32_t>(descriptor->use_flags);
+ hnd->tiling = bo->meta.tiling;
+ hnd->format_modifier = drv_bo_get_plane_format_modifier(bo, 0);
+ hnd->use_flags = descriptor->use_flags;
bytes_per_pixel = drv_bytes_per_pixel_from_format(hnd->format, 0);
hnd->pixel_stride = DIV_ROUND_UP(hnd->strides[0], bytes_per_pixel);
hnd->magic = cros_gralloc_magic;
hnd->droid_format = descriptor->droid_format;
- hnd->usage = descriptor->producer_usage;
+ hnd->usage = descriptor->droid_usage;
+ hnd->total_size = descriptor->reserved_region_size + bo->meta.total_size;
+ hnd->name_offset = handle_data_size;
+
+ name = (char *)(&hnd->base.data[hnd->name_offset]);
+ snprintf(name, descriptor->name.size() + 1, "%s", descriptor->name.c_str());
id = drv_bo_get_plane_handle(bo, 0).u32;
- auto buffer = new cros_gralloc_buffer(id, bo, hnd);
+ auto buffer = new cros_gralloc_buffer(id, bo, hnd, hnd->fds[hnd->num_planes],
+ hnd->reserved_region_size);
std::lock_guard<std::mutex> lock(mutex_);
buffers_.emplace(id, buffer);
handles_.emplace(hnd, std::make_pair(buffer, 1));
- *out_handle = &hnd->base;
+ *out_handle = reinterpret_cast<buffer_handle_t>(hnd);
return 0;
}
@@ -208,18 +298,17 @@ int32_t cros_gralloc_driver::retain(buffer_handle_t handle)
struct bo *bo;
struct drv_import_fd_data data;
data.format = hnd->format;
+ data.tiling = hnd->tiling;
+
data.width = hnd->width;
data.height = hnd->height;
- data.use_flags = static_cast<uint64_t>(hnd->use_flags[0]) << 32;
- data.use_flags |= hnd->use_flags[1];
+ data.use_flags = hnd->use_flags;
memcpy(data.fds, hnd->fds, sizeof(data.fds));
memcpy(data.strides, hnd->strides, sizeof(data.strides));
memcpy(data.offsets, hnd->offsets, sizeof(data.offsets));
for (uint32_t plane = 0; plane < DRV_MAX_PLANES; plane++) {
- data.format_modifiers[plane] =
- static_cast<uint64_t>(hnd->format_modifiers[2 * plane]) << 32;
- data.format_modifiers[plane] |= hnd->format_modifiers[2 * plane + 1];
+ data.format_modifiers[plane] = hnd->format_modifier;
}
bo = drv_bo_import(drv_, &data);
@@ -228,7 +317,8 @@ int32_t cros_gralloc_driver::retain(buffer_handle_t handle)
id = drv_bo_get_plane_handle(bo, 0).u32;
- buffer = new cros_gralloc_buffer(id, bo, nullptr);
+ buffer = new cros_gralloc_buffer(id, bo, nullptr, hnd->fds[hnd->num_planes],
+ hnd->reserved_region_size);
buffers_.emplace(id, buffer);
}
@@ -264,10 +354,10 @@ int32_t cros_gralloc_driver::release(buffer_handle_t handle)
}
int32_t cros_gralloc_driver::lock(buffer_handle_t handle, int32_t acquire_fence,
- const struct rectangle *rect, uint32_t map_flags,
- uint8_t *addr[DRV_MAX_PLANES])
+ bool close_acquire_fence, const struct rectangle *rect,
+ uint32_t map_flags, uint8_t *addr[DRV_MAX_PLANES])
{
- int32_t ret = cros_gralloc_sync_wait(acquire_fence);
+ int32_t ret = cros_gralloc_sync_wait(acquire_fence, close_acquire_fence);
if (ret)
return ret;
@@ -313,6 +403,51 @@ int32_t cros_gralloc_driver::unlock(buffer_handle_t handle, int32_t *release_fen
return buffer->unlock();
}
+int32_t cros_gralloc_driver::invalidate(buffer_handle_t handle)
+{
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (!buffer) {
+ drv_log("Invalid Reference.\n");
+ return -EINVAL;
+ }
+
+ return buffer->invalidate();
+}
+
+int32_t cros_gralloc_driver::flush(buffer_handle_t handle, int32_t *release_fence)
+{
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (!buffer) {
+ drv_log("Invalid Reference.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * From the ANativeWindow::dequeueBuffer documentation:
+ *
+ * "A value of -1 indicates that the caller may access the buffer immediately without
+ * waiting on a fence."
+ */
+ *release_fence = -1;
+ return buffer->flush();
+}
+
int32_t cros_gralloc_driver::get_backing_store(buffer_handle_t handle, uint64_t *out_store)
{
std::lock_guard<std::mutex> lock(mutex_);
@@ -353,6 +488,32 @@ int32_t cros_gralloc_driver::resource_info(buffer_handle_t handle, uint32_t stri
return buffer->resource_info(strides, offsets);
}
+int32_t cros_gralloc_driver::get_reserved_region(buffer_handle_t handle,
+ void **reserved_region_addr,
+ uint64_t *reserved_region_size)
+{
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (!buffer) {
+ drv_log("Invalid Reference.\n");
+ return -EINVAL;
+ }
+
+ return buffer->get_reserved_region(reserved_region_addr, reserved_region_size);
+}
+
+uint32_t cros_gralloc_driver::get_resolved_drm_format(uint32_t drm_format, uint64_t usage)
+{
+ return drv_resolve_format(drv_, drm_format, usage);
+}
+
cros_gralloc_buffer *cros_gralloc_driver::get_buffer(cros_gralloc_handle_t hnd)
{
/* Assumes driver mutex is held. */
@@ -361,3 +522,13 @@ cros_gralloc_buffer *cros_gralloc_driver::get_buffer(cros_gralloc_handle_t hnd)
return nullptr;
}
+
+void cros_gralloc_driver::for_each_handle(
+ const std::function<void(cros_gralloc_handle_t)> &function)
+{
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ for (const auto &pair : handles_) {
+ function(pair.first);
+ }
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h
index f0512777ce1..d444ecd93f4 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h
@@ -9,6 +9,7 @@
#include "cros_gralloc_buffer.h"
+#include <functional>
#include <mutex>
#include <unordered_map>
@@ -26,14 +27,25 @@ class cros_gralloc_driver
int32_t retain(buffer_handle_t handle);
int32_t release(buffer_handle_t handle);
- int32_t lock(buffer_handle_t handle, int32_t acquire_fence, const struct rectangle *rect,
- uint32_t map_flags, uint8_t *addr[DRV_MAX_PLANES]);
+ int32_t lock(buffer_handle_t handle, int32_t acquire_fence, bool close_acquire_fence,
+ const struct rectangle *rect, uint32_t map_flags,
+ uint8_t *addr[DRV_MAX_PLANES]);
int32_t unlock(buffer_handle_t handle, int32_t *release_fence);
+ int32_t invalidate(buffer_handle_t handle);
+ int32_t flush(buffer_handle_t handle, int32_t *release_fence);
+
int32_t get_backing_store(buffer_handle_t handle, uint64_t *out_store);
int32_t resource_info(buffer_handle_t handle, uint32_t strides[DRV_MAX_PLANES],
uint32_t offsets[DRV_MAX_PLANES]);
+ int32_t get_reserved_region(buffer_handle_t handle, void **reserved_region_addr,
+ uint64_t *reserved_region_size);
+
+ uint32_t get_resolved_drm_format(uint32_t drm_format, uint64_t usage);
+
+ void for_each_handle(const std::function<void(cros_gralloc_handle_t)> &function);
+
private:
cros_gralloc_driver(cros_gralloc_driver const &);
cros_gralloc_driver operator=(cros_gralloc_driver const &);
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_handle.h b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_handle.h
index cd3edfe7680..b5525d1ed36 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_handle.h
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_handle.h
@@ -11,27 +11,41 @@
#include <cutils/native_handle.h>
#define DRV_MAX_PLANES 4
-
-/*
- * Only use 32-bit integers in the handle. This guarantees that the handle is
- * densely packed (i.e, the compiler does not insert any padding).
- */
+#define DRV_MAX_FDS (DRV_MAX_PLANES + 1)
struct cros_gralloc_handle {
native_handle_t base;
- int32_t fds[DRV_MAX_PLANES];
+ /*
+ * File descriptors must immediately follow the native_handle_t base and used file
+ * descriptors must be packed at the beginning of this array to work with
+ * native_handle_clone().
+ *
+ * This field contains 'num_planes' plane file descriptors followed by an optional metadata
+ * reserved region file descriptor if 'reserved_region_size' is greater than zero.
+ */
+ int32_t fds[DRV_MAX_FDS];
uint32_t strides[DRV_MAX_PLANES];
uint32_t offsets[DRV_MAX_PLANES];
- uint32_t format_modifiers[2 * DRV_MAX_PLANES];
+ uint32_t sizes[DRV_MAX_PLANES];
+ uint32_t id;
uint32_t width;
uint32_t height;
- uint32_t format; /* DRM format */
- uint32_t use_flags[2]; /* Buffer creation flags */
+ uint32_t format; /* DRM format */
+ uint32_t tiling;
+ uint64_t format_modifier;
+ uint64_t use_flags; /* Buffer creation flags */
uint32_t magic;
uint32_t pixel_stride;
int32_t droid_format;
int32_t usage; /* Android usage. */
-};
+ uint32_t num_planes;
+ uint64_t reserved_region_size;
+ uint64_t total_size; /* Total allocation size */
+ /*
+ * Name is a null terminated char array located at handle->base.data[handle->name_offset].
+ */
+ uint32_t name_offset;
+} __attribute__((packed));
typedef const struct cros_gralloc_handle *cros_gralloc_handle_t;
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc
index 73e59cb5700..43199367929 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc
@@ -20,6 +20,8 @@ uint32_t cros_gralloc_convert_format(int format)
return DRM_FORMAT_ARGB8888;
case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
return DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED;
+ case HAL_PIXEL_FORMAT_RAW16:
+ return DRM_FORMAT_R16;
case HAL_PIXEL_FORMAT_RGB_565:
return DRM_FORMAT_RGB565;
case HAL_PIXEL_FORMAT_RGB_888:
@@ -59,30 +61,39 @@ cros_gralloc_handle_t cros_gralloc_convert_handle(buffer_handle_t handle)
return hnd;
}
-int32_t cros_gralloc_sync_wait(int32_t acquire_fence)
+int32_t cros_gralloc_sync_wait(int32_t fence, bool close_fence)
{
- if (acquire_fence < 0)
+ if (fence < 0)
return 0;
/*
* Wait initially for 1000 ms, and then wait indefinitely. The SYNC_IOC_WAIT
* documentation states the caller waits indefinitely on the fence if timeout < 0.
*/
- int err = sync_wait(acquire_fence, 1000);
+ int err = sync_wait(fence, 1000);
if (err < 0) {
drv_log("Timed out on sync wait, err = %s\n", strerror(errno));
- err = sync_wait(acquire_fence, -1);
+ err = sync_wait(fence, -1);
if (err < 0) {
drv_log("sync wait error = %s\n", strerror(errno));
return -errno;
}
}
- err = close(acquire_fence);
- if (err) {
- drv_log("Unable to close fence fd, err = %s\n", strerror(errno));
- return -errno;
+ if (close_fence) {
+ err = close(fence);
+ if (err) {
+ drv_log("Unable to close fence fd, err = %s\n", strerror(errno));
+ return -errno;
+ }
}
return 0;
}
+
+std::string get_drm_format_string(uint32_t drm_format)
+{
+ char *sequence = (char *)&drm_format;
+ std::string s(sequence, 4);
+ return "DRM_FOURCC_" + s;
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h
index a55eebcada4..a43833d6c3f 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h
@@ -22,6 +22,8 @@ uint32_t cros_gralloc_convert_format(int32_t format);
cros_gralloc_handle_t cros_gralloc_convert_handle(buffer_handle_t handle);
-int32_t cros_gralloc_sync_wait(int32_t acquire_fence);
+int32_t cros_gralloc_sync_wait(int32_t fence, bool close_fence);
+
+std::string get_drm_format_string(uint32_t drm_format);
#endif
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_types.h b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_types.h
index 1fa81de5b67..22f58e2c4c8 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_types.h
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_types.h
@@ -7,14 +7,17 @@
#ifndef CROS_GRALLOC_TYPES_H
#define CROS_GRALLOC_TYPES_H
+#include <string>
+
struct cros_gralloc_buffer_descriptor {
uint32_t width;
uint32_t height;
- uint32_t consumer_usage;
- uint32_t producer_usage;
- uint32_t droid_format;
+ int32_t droid_format;
+ int32_t droid_usage;
uint32_t drm_format;
uint64_t use_flags;
+ uint64_t reserved_region_size;
+ std::string name;
};
#endif
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc
index 6c49d3ae3e1..3a08724ec3e 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc
@@ -19,6 +19,15 @@ struct gralloc0_module {
std::mutex initialization_mutex;
};
+struct cros_gralloc0_buffer_info {
+ uint32_t drm_fourcc;
+ int num_fds;
+ int fds[4];
+ uint64_t modifier;
+ uint32_t offset[4];
+ uint32_t stride[4];
+};
+
/* This enumeration must match the one in <gralloc_drm.h>.
* The functions supported by this gralloc's temporary private API are listed
* below. Use of these functions is highly discouraged and should only be
@@ -31,6 +40,7 @@ enum {
GRALLOC_DRM_GET_FORMAT,
GRALLOC_DRM_GET_DIMENSIONS,
GRALLOC_DRM_GET_BACKING_STORE,
+ GRALLOC_DRM_GET_BUFFER_INFO,
};
// clang-format on
@@ -70,8 +80,9 @@ static uint64_t gralloc0_convert_usage(int usage)
* rockchip) and usb monitors (evdi/udl). It's complicated so ignore it.
* */
use_flags |= BO_USE_NONE;
+ /* Map this flag to linear until real HW protection is available on Android. */
if (usage & GRALLOC_USAGE_PROTECTED)
- use_flags |= BO_USE_PROTECTED;
+ use_flags |= BO_USE_LINEAR;
if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) {
use_flags |= BO_USE_HW_VIDEO_ENCODER;
/*HACK: See b/30054495 */
@@ -119,9 +130,10 @@ static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usa
descriptor.width = w;
descriptor.height = h;
descriptor.droid_format = format;
- descriptor.producer_usage = descriptor.consumer_usage = usage;
+ descriptor.droid_usage = usage;
descriptor.drm_format = cros_gralloc_convert_format(format);
descriptor.use_flags = gralloc0_convert_usage(usage);
+ descriptor.reserved_region_size = 0;
supported = mod->driver->is_supported(&descriptor);
if (!supported && (usage & GRALLOC_USAGE_HW_COMPOSER)) {
@@ -129,11 +141,13 @@ static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usa
supported = mod->driver->is_supported(&descriptor);
}
if (!supported && (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) &&
- !gralloc0_droid_yuv_format(format)) {
- // Unmask BO_USE_HW_VIDEO_ENCODER in the case of non-yuv formats
- // because they are not input to a hw encoder but used as an
- // intermediate format (e.g. camera).
+ format != HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ // Unmask BO_USE_HW_VIDEO_ENCODER for other formats. They are mostly
+ // intermediate formats not passed directly to the encoder (e.g.
+ // camera). YV12 is passed to the encoder component, but it is converted
+ // to YCbCr_420_888 before being passed to the hw encoder.
descriptor.use_flags &= ~BO_USE_HW_VIDEO_ENCODER;
+ drv_log("Retrying format %u allocation without encoder flag", format);
supported = mod->driver->is_supported(&descriptor);
}
@@ -248,7 +262,7 @@ static int gralloc0_unlock(struct gralloc_module_t const *module, buffer_handle_
if (ret)
return ret;
- ret = cros_gralloc_sync_wait(fence_fd);
+ ret = cros_gralloc_sync_wait(fence_fd, /*close_acquire_fence=*/true);
if (ret)
return ret;
@@ -264,6 +278,7 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
uint32_t *out_width, *out_height, *out_stride;
uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
+ struct cros_gralloc0_buffer_info *info;
auto mod = (struct gralloc0_module const *)module;
switch (op) {
@@ -271,6 +286,7 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
case GRALLOC_DRM_GET_FORMAT:
case GRALLOC_DRM_GET_DIMENSIONS:
case GRALLOC_DRM_GET_BACKING_STORE:
+ case GRALLOC_DRM_GET_BUFFER_INFO:
break;
default:
return -EINVAL;
@@ -315,6 +331,17 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
out_store = va_arg(args, uint64_t *);
ret = mod->driver->get_backing_store(handle, out_store);
break;
+ case GRALLOC_DRM_GET_BUFFER_INFO:
+ info = va_arg(args, struct cros_gralloc0_buffer_info *);
+ info->drm_fourcc = hnd->format;
+ info->num_fds = hnd->num_planes;
+ info->modifier = hnd->format_modifier;
+ for (uint32_t i = 0; i < hnd->num_planes; i++) {
+ info->fds[i] = hnd->fds[i];
+ info->offset[i] = hnd->offsets[i];
+ info->stride[i] = hnd->strides[i];
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -359,7 +386,7 @@ static int gralloc0_lock_async(struct gralloc_module_t const *module, buffer_han
assert(h >= 0);
map_flags = gralloc0_convert_map_usage(usage);
- ret = mod->driver->lock(handle, fence_fd, &rect, map_flags, addr);
+ ret = mod->driver->lock(handle, fence_fd, true, &rect, map_flags, addr);
*vaddr = addr[0];
return ret;
}
@@ -404,7 +431,7 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
assert(h >= 0);
map_flags = gralloc0_convert_map_usage(usage);
- ret = mod->driver->lock(handle, fence_fd, &rect, map_flags, addr);
+ ret = mod->driver->lock(handle, fence_fd, true, &rect, map_flags, addr);
if (ret)
return ret;
@@ -414,7 +441,8 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
return ret;
for (uint32_t plane = 0; plane < DRV_MAX_PLANES; plane++)
- addr[plane] = static_cast<uint8_t *>(nullptr) + offsets[plane];
+ addr[plane] =
+ reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(offsets[plane]));
}
switch (hnd->format) {
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/.clang-format b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/.clang-format
new file mode 100644
index 00000000000..534cd32d572
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/.clang-format
@@ -0,0 +1,19 @@
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This directory is formatted to match the format of the interfaces implemented.
+
+BasedOnStyle: Google
+Standard: Cpp11
+AccessModifierOffset: -2
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+IncludeBlocks: Preserve
+IndentWidth: 4
+ContinuationIndentWidth: 8
+PointerAlignment: Left
+TabWidth: 4
+UseTab: Never \ No newline at end of file
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Allocator.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Allocator.cc
new file mode 100644
index 00000000000..57c49e9aefe
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Allocator.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc/gralloc3/CrosGralloc3Allocator.h"
+
+#include <optional>
+
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+
+#include "cros_gralloc/cros_gralloc_helpers.h"
+#include "cros_gralloc/gralloc3/CrosGralloc3Utils.h"
+
+using android::hardware::hidl_handle;
+using android::hardware::hidl_vec;
+using android::hardware::Return;
+using android::hardware::Void;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::common::V1_2::PixelFormat;
+using android::hardware::graphics::mapper::V3_0::Error;
+
+using BufferDescriptorInfo =
+ android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo;
+
+CrosGralloc3Allocator::CrosGralloc3Allocator() : mDriver(std::make_unique<cros_gralloc_driver>()) {
+ if (mDriver->init()) {
+ drv_log("Failed to initialize driver.\n");
+ mDriver = nullptr;
+ }
+}
+
+Error CrosGralloc3Allocator::allocate(const BufferDescriptorInfo& descriptor, uint32_t* outStride,
+ hidl_handle* outHandle) {
+ if (!mDriver) {
+ drv_log("Failed to allocate. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ if (!outStride || !outHandle) {
+ return Error::NO_RESOURCES;
+ }
+
+ struct cros_gralloc_buffer_descriptor crosDescriptor;
+ if (convertToCrosDescriptor(descriptor, &crosDescriptor)) {
+ return Error::UNSUPPORTED;
+ }
+
+ bool supported = mDriver->is_supported(&crosDescriptor);
+ if (!supported && (descriptor.usage & BufferUsage::COMPOSER_OVERLAY)) {
+ crosDescriptor.use_flags &= ~BO_USE_SCANOUT;
+ supported = mDriver->is_supported(&crosDescriptor);
+ }
+
+ if (!supported) {
+ std::string drmFormatString = get_drm_format_string(crosDescriptor.drm_format);
+ std::string pixelFormatString = getPixelFormatString(descriptor.format);
+ std::string usageString = getUsageString(descriptor.usage);
+ drv_log("Unsupported combination -- pixel format: %s, drm format:%s, usage: %s\n",
+ pixelFormatString.c_str(), drmFormatString.c_str(), usageString.c_str());
+ return Error::UNSUPPORTED;
+ }
+
+ buffer_handle_t handle;
+ int ret = mDriver->allocate(&crosDescriptor, &handle);
+ if (ret) {
+ return Error::NO_RESOURCES;
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(handle);
+ if (!crosHandle) {
+ return Error::NO_RESOURCES;
+ }
+
+ *outHandle = handle;
+ *outStride = crosHandle->pixel_stride;
+
+ return Error::NONE;
+}
+
+Return<void> CrosGralloc3Allocator::allocate(const hidl_vec<uint32_t>& encoded, uint32_t count,
+ allocate_cb hidlCb) {
+ hidl_vec<hidl_handle> handles;
+
+ if (!mDriver) {
+ drv_log("Failed to allocate. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, 0, handles);
+ return Void();
+ }
+
+ auto descriptor_opt = decodeBufferDescriptorInfo(encoded);
+ if (!descriptor_opt) {
+ drv_log("Failed to allocate. Failed to decode buffer descriptor.\n");
+ hidlCb(Error::BAD_DESCRIPTOR, 0, handles);
+ return Void();
+ }
+
+ BufferDescriptorInfo descriptor = *descriptor_opt;
+
+ handles.resize(count);
+
+ uint32_t stride = 0;
+ for (int i = 0; i < handles.size(); i++) {
+ Error err = allocate(descriptor, &stride, &(handles[i]));
+ if (err != Error::NONE) {
+ for (int j = 0; j < i; j++) {
+ mDriver->release(handles[j].getNativeHandle());
+ }
+ handles.resize(0);
+ hidlCb(err, 0, handles);
+ return Void();
+ }
+ }
+
+ hidlCb(Error::NONE, stride, handles);
+
+ for (const hidl_handle& handle : handles) {
+ mDriver->release(handle.getNativeHandle());
+ }
+
+ return Void();
+}
+
+Return<void> CrosGralloc3Allocator::dumpDebugInfo(dumpDebugInfo_cb hidl_cb) {
+ hidl_cb("CrosGralloc3Allocator::dumpDebugInfo unimplemented.");
+ return Void();
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Allocator.h b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Allocator.h
new file mode 100644
index 00000000000..655143c35e5
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Allocator.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <android/hardware/graphics/allocator/3.0/IAllocator.h>
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+
+#include "cros_gralloc/cros_gralloc_driver.h"
+
+class CrosGralloc3Allocator : public android::hardware::graphics::allocator::V3_0::IAllocator {
+ public:
+ CrosGralloc3Allocator();
+
+ android::hardware::Return<void> allocate(
+ const android::hardware::hidl_vec<uint32_t>& descriptor, uint32_t count,
+ allocate_cb hidl_cb) override;
+
+ android::hardware::Return<void> dumpDebugInfo(dumpDebugInfo_cb hidl_cb) override;
+
+ private:
+ android::hardware::graphics::mapper::V3_0::Error allocate(
+ const android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo&
+ description,
+ uint32_t* outStride, android::hardware::hidl_handle* outHandle);
+
+ std::unique_ptr<cros_gralloc_driver> mDriver;
+};
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3AllocatorService.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3AllocatorService.cc
new file mode 100644
index 00000000000..daab5084b4d
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3AllocatorService.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#define LOG_TAG "AllocatorService"
+
+#include <hidl/LegacySupport.h>
+
+#include "cros_gralloc/gralloc3/CrosGralloc3Allocator.h"
+
+using android::sp;
+using android::hardware::configureRpcThreadpool;
+using android::hardware::joinRpcThreadpool;
+using android::hardware::graphics::allocator::V3_0::IAllocator;
+
+int main(int, char**) {
+ sp<IAllocator> allocator = new CrosGralloc3Allocator();
+ configureRpcThreadpool(4, true /* callerWillJoin */);
+ if (allocator->registerAsService() != android::NO_ERROR) {
+ ALOGE("failed to register graphics IAllocator 3.0 service");
+ return -EINVAL;
+ }
+
+ ALOGI("graphics IAllocator 3.0 service is initialized");
+ android::hardware::joinRpcThreadpool();
+ ALOGI("graphics IAllocator 3.0 service is terminating");
+ return 0;
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Mapper.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Mapper.cc
new file mode 100644
index 00000000000..b1082a00748
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Mapper.cc
@@ -0,0 +1,490 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc/gralloc3/CrosGralloc3Mapper.h"
+
+#include <cutils/native_handle.h>
+
+#include "cros_gralloc/cros_gralloc_helpers.h"
+#include "cros_gralloc/gralloc3/CrosGralloc3Utils.h"
+
+#include "helpers.h"
+
+using android::hardware::hidl_handle;
+using android::hardware::hidl_vec;
+using android::hardware::Return;
+using android::hardware::Void;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::common::V1_2::PixelFormat;
+using android::hardware::graphics::mapper::V3_0::Error;
+using android::hardware::graphics::mapper::V3_0::IMapper;
+using android::hardware::graphics::mapper::V3_0::YCbCrLayout;
+
+CrosGralloc3Mapper::CrosGralloc3Mapper() : mDriver(std::make_unique<cros_gralloc_driver>()) {
+ if (mDriver->init()) {
+ drv_log("Failed to initialize driver.\n");
+ mDriver = nullptr;
+ }
+}
+
+Return<void> CrosGralloc3Mapper::createDescriptor(const BufferDescriptorInfo& description,
+ createDescriptor_cb hidlCb) {
+ hidl_vec<uint32_t> descriptor;
+
+ if (description.width == 0) {
+ drv_log("Failed to createDescriptor. Bad width: %d.\n", description.width);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ if (description.height == 0) {
+ drv_log("Failed to createDescriptor. Bad height: %d.\n", description.height);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ if (description.layerCount == 0) {
+ drv_log("Failed to createDescriptor. Bad layer count: %d.\n", description.layerCount);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ auto descriptor_opt = encodeBufferDescriptorInfo(description);
+ if (!descriptor_opt) {
+ drv_log("Failed to createDescriptor. Failed to encodeBufferDescriptorInfo\n");
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ descriptor = *descriptor_opt;
+ hidlCb(Error::NONE, descriptor);
+ return Void();
+}
+
+Return<void> CrosGralloc3Mapper::importBuffer(const hidl_handle& handle, importBuffer_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to import buffer. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ const native_handle_t* bufferHandle = handle.getNativeHandle();
+ if (!bufferHandle || bufferHandle->numFds == 0) {
+ drv_log("Failed to importBuffer. Bad handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ native_handle_t* importedBufferHandle = native_handle_clone(bufferHandle);
+ if (!importedBufferHandle) {
+ drv_log("Failed to importBuffer. Handle clone failed.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ int ret = mDriver->retain(importedBufferHandle);
+ if (ret) {
+ native_handle_close(importedBufferHandle);
+ native_handle_delete(importedBufferHandle);
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, importedBufferHandle);
+ return Void();
+}
+
+Return<Error> CrosGralloc3Mapper::freeBuffer(void* rawHandle) {
+ if (!mDriver) {
+ drv_log("Failed to freeBuffer. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ native_handle_t* bufferHandle = reinterpret_cast<native_handle_t*>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to freeBuffer. Empty handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ int ret = mDriver->release(bufferHandle);
+ if (ret) {
+ drv_log("Failed to freeBuffer.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ native_handle_close(bufferHandle);
+ native_handle_delete(bufferHandle);
+ return Error::NONE;
+}
+
+Return<Error> CrosGralloc3Mapper::validateBufferSize(void* rawHandle,
+ const BufferDescriptorInfo& descriptor,
+ uint32_t stride) {
+ if (!mDriver) {
+ drv_log("Failed to validateBufferSize. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ native_handle_t* bufferHandle = reinterpret_cast<native_handle_t*>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to validateBufferSize. Empty handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (!crosHandle) {
+ drv_log("Failed to validateBufferSize. Invalid handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ PixelFormat crosHandleFormat = static_cast<PixelFormat>(crosHandle->droid_format);
+ if (descriptor.format != crosHandleFormat) {
+ drv_log("Failed to validateBufferSize. Format mismatch.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ if (descriptor.width != crosHandle->width) {
+ drv_log("Failed to validateBufferSize. Width mismatch (%d vs %d).\n", descriptor.width,
+ crosHandle->width);
+ return Error::BAD_VALUE;
+ }
+
+ if (descriptor.height != crosHandle->height) {
+ drv_log("Failed to validateBufferSize. Height mismatch (%d vs %d).\n", descriptor.height,
+ crosHandle->height);
+ return Error::BAD_VALUE;
+ }
+
+ if (stride != crosHandle->pixel_stride) {
+ drv_log("Failed to validateBufferSize. Stride mismatch (%d vs %d).\n", stride,
+ crosHandle->pixel_stride);
+ return Error::BAD_VALUE;
+ }
+
+ return Error::NONE;
+}
+
+Return<void> CrosGralloc3Mapper::getTransportSize(void* rawHandle, getTransportSize_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to getTransportSize. Driver is uninitialized.\n");
+ hidlCb(Error::BAD_BUFFER, 0, 0);
+ return Void();
+ }
+
+ native_handle_t* bufferHandle = reinterpret_cast<native_handle_t*>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to getTransportSize. Bad handle.\n");
+ hidlCb(Error::BAD_BUFFER, 0, 0);
+ return Void();
+ }
+
+ // No local process data is currently stored on the native handle.
+ hidlCb(Error::NONE, bufferHandle->numFds, bufferHandle->numInts);
+ return Void();
+}
+
+Return<void> CrosGralloc3Mapper::lock(void* rawHandle, uint64_t cpuUsage, const Rect& accessRegion,
+ const hidl_handle& acquireFence, lock_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to lock. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr, 0, 0);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to lock. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr, 0, 0);
+ return Void();
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (crosHandle == nullptr) {
+ drv_log("Failed to lock. Invalid handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr, 0, 0);
+ return Void();
+ }
+
+ LockResult result = lockInternal(crosHandle, cpuUsage, accessRegion, acquireFence);
+ if (result.error != Error::NONE) {
+ drv_log("Failed to lock. Failed to lockInternal.\n");
+ hidlCb(result.error, nullptr, 0, 0);
+ return Void();
+ }
+
+ int32_t bytesPerPixel = drv_bytes_per_pixel_from_format(crosHandle->format, 0);
+ int32_t bytesPerStride = static_cast<int32_t>(crosHandle->strides[0]);
+
+ hidlCb(Error::NONE, result.mapped[0], bytesPerPixel, bytesPerStride);
+ return Void();
+}
+
+Return<void> CrosGralloc3Mapper::lockYCbCr(void* rawHandle, uint64_t cpuUsage,
+ const Rect& accessRegion,
+ const android::hardware::hidl_handle& acquireFence,
+ lockYCbCr_cb hidlCb) {
+ YCbCrLayout ycbcr = {};
+
+ if (!mDriver) {
+ drv_log("Failed to lock. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, ycbcr);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to lockYCbCr. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, ycbcr);
+ return Void();
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (crosHandle == nullptr) {
+ drv_log("Failed to lockYCbCr. Invalid handle.\n");
+ hidlCb(Error::BAD_BUFFER, ycbcr);
+ return Void();
+ }
+
+ LockResult result = lockInternal(crosHandle, cpuUsage, accessRegion, acquireFence);
+ if (result.error != Error::NONE) {
+ drv_log("Failed to lockYCbCr. Failed to lockInternal.\n");
+ hidlCb(result.error, ycbcr);
+ return Void();
+ }
+
+ switch (crosHandle->format) {
+ case DRM_FORMAT_NV12: {
+ ycbcr.y = result.mapped[0] + crosHandle->offsets[0];
+ ycbcr.cb = result.mapped[0] + crosHandle->offsets[1];
+ ycbcr.cr = result.mapped[0] + crosHandle->offsets[1] + 1;
+ ycbcr.yStride = crosHandle->strides[0];
+ ycbcr.cStride = crosHandle->strides[1];
+ ycbcr.chromaStep = 2;
+ break;
+ }
+ case DRM_FORMAT_NV21: {
+ ycbcr.y = result.mapped[0] + crosHandle->offsets[0];
+ ycbcr.cb = result.mapped[0] + crosHandle->offsets[1] + 1;
+ ycbcr.cr = result.mapped[0] + crosHandle->offsets[1];
+ ycbcr.yStride = crosHandle->strides[0];
+ ycbcr.cStride = crosHandle->strides[1];
+ ycbcr.chromaStep = 2;
+ break;
+ }
+ case DRM_FORMAT_YVU420: {
+ ycbcr.y = result.mapped[0] + crosHandle->offsets[0];
+ ycbcr.cb = result.mapped[0] + crosHandle->offsets[1];
+ ycbcr.cr = result.mapped[0] + crosHandle->offsets[2];
+ ycbcr.yStride = crosHandle->strides[0];
+ ycbcr.cStride = crosHandle->strides[1];
+ ycbcr.chromaStep = 1;
+ break;
+ }
+ case DRM_FORMAT_YVU420_ANDROID: {
+ ycbcr.y = result.mapped[0] + crosHandle->offsets[0];
+ ycbcr.cb = result.mapped[0] + crosHandle->offsets[2];
+ ycbcr.cr = result.mapped[0] + crosHandle->offsets[1];
+ ycbcr.yStride = crosHandle->strides[0];
+ ycbcr.cStride = crosHandle->strides[1];
+ ycbcr.chromaStep = 1;
+ break;
+ }
+ default: {
+ std::string format = get_drm_format_string(crosHandle->format);
+ drv_log("Failed to lockYCbCr. Unhandled format: %s\n", format.c_str());
+ hidlCb(Error::BAD_BUFFER, ycbcr);
+ return Void();
+ }
+ }
+
+ hidlCb(Error::NONE, ycbcr);
+ return Void();
+}
+
+CrosGralloc3Mapper::LockResult CrosGralloc3Mapper::lockInternal(
+ cros_gralloc_handle_t crosHandle, uint64_t cpuUsage, const Rect& region,
+ const android::hardware::hidl_handle& acquireFence) {
+ LockResult result = {};
+
+ if (!mDriver) {
+ drv_log("Failed to lock. Driver is uninitialized.\n");
+ result.error = Error::NO_RESOURCES;
+ return result;
+ }
+
+ if (cpuUsage == 0) {
+ drv_log("Failed to lock. Bad cpu usage: %" PRIu64 ".\n", cpuUsage);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ uint32_t mapUsage = 0;
+ int ret = convertToMapUsage(cpuUsage, &mapUsage);
+ if (ret) {
+ drv_log("Failed to lock. Convert usage failed.\n");
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.left < 0) {
+ drv_log("Failed to lock. Invalid region: negative left value %d.\n", region.left);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.top < 0) {
+ drv_log("Failed to lock. Invalid region: negative top value %d.\n", region.top);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.width < 0) {
+ drv_log("Failed to lock. Invalid region: negative width value %d.\n", region.width);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.height < 0) {
+ drv_log("Failed to lock. Invalid region: negative height value %d.\n", region.height);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.width > crosHandle->width) {
+ drv_log("Failed to lock. Invalid region: width greater than buffer width (%d vs %d).\n",
+ region.width, crosHandle->width);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.height > crosHandle->height) {
+ drv_log("Failed to lock. Invalid region: height greater than buffer height (%d vs %d).\n",
+ region.height, crosHandle->height);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ struct rectangle rect = {static_cast<uint32_t>(region.left), static_cast<uint32_t>(region.top),
+ static_cast<uint32_t>(region.width),
+ static_cast<uint32_t>(region.height)};
+
+ // An access region of all zeros means the entire buffer.
+ if (rect.x == 0 && rect.y == 0 && rect.width == 0 && rect.height == 0) {
+ rect.width = crosHandle->width;
+ rect.height = crosHandle->height;
+ }
+
+ int acquireFenceFd = -1;
+ ret = convertToFenceFd(acquireFence, &acquireFenceFd);
+ if (ret) {
+ drv_log("Failed to lock. Bad acquire fence.\n");
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(crosHandle);
+ ret = mDriver->lock(bufferHandle, acquireFenceFd, false, &rect, mapUsage, result.mapped);
+ if (ret) {
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ result.error = Error::NONE;
+ return result;
+}
+
+Return<void> CrosGralloc3Mapper::unlock(void* rawHandle, unlock_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to unlock. Driver is uninitialized.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to unlock. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ int releaseFenceFd = -1;
+ int ret = mDriver->unlock(bufferHandle, &releaseFenceFd);
+ if (ret) {
+ drv_log("Failed to unlock.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ hidl_handle releaseFenceHandle;
+ ret = convertToFenceHandle(releaseFenceFd, &releaseFenceHandle);
+ if (ret) {
+ drv_log("Failed to unlock. Failed to convert release fence to handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, releaseFenceHandle);
+ return Void();
+}
+
+Return<void> CrosGralloc3Mapper::isSupported(const BufferDescriptorInfo& descriptor,
+ isSupported_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to isSupported. Driver is uninitialized.\n");
+ hidlCb(Error::BAD_VALUE, false);
+ return Void();
+ }
+
+ struct cros_gralloc_buffer_descriptor crosDescriptor;
+ if (convertToCrosDescriptor(descriptor, &crosDescriptor)) {
+ hidlCb(Error::NONE, false);
+ return Void();
+ }
+
+ bool supported = mDriver->is_supported(&crosDescriptor);
+ if (!supported) {
+ crosDescriptor.use_flags &= ~BO_USE_SCANOUT;
+ supported = mDriver->is_supported(&crosDescriptor);
+ }
+
+ hidlCb(Error::NONE, supported);
+ return Void();
+}
+
+int CrosGralloc3Mapper::getResolvedDrmFormat(PixelFormat pixelFormat, uint64_t bufferUsage,
+ uint32_t* outDrmFormat) {
+ uint32_t drmFormat;
+ if (convertToDrmFormat(pixelFormat, &drmFormat)) {
+ std::string pixelFormatString = getPixelFormatString(pixelFormat);
+ drv_log("Failed to getResolvedDrmFormat. Failed to convert format %s\n",
+ pixelFormatString.c_str());
+ return -1;
+ }
+
+ uint64_t usage;
+ if (convertToBufferUsage(bufferUsage, &usage)) {
+ std::string usageString = getUsageString(bufferUsage);
+ drv_log("Failed to getResolvedDrmFormat. Failed to convert usage %s\n",
+ usageString.c_str());
+ return -1;
+ }
+
+ uint32_t resolvedDrmFormat = mDriver->get_resolved_drm_format(drmFormat, usage);
+ if (resolvedDrmFormat == DRM_FORMAT_INVALID) {
+ std::string drmFormatString = get_drm_format_string(drmFormat);
+ drv_log("Failed to getResolvedDrmFormat. Failed to resolve drm format %s\n",
+ drmFormatString.c_str());
+ return -1;
+ }
+
+ *outDrmFormat = resolvedDrmFormat;
+
+ return 0;
+}
+
+android::hardware::graphics::mapper::V3_0::IMapper* HIDL_FETCH_IMapper(const char* /*name*/) {
+ return static_cast<android::hardware::graphics::mapper::V3_0::IMapper*>(new CrosGralloc3Mapper);
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Mapper.h b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Mapper.h
new file mode 100644
index 00000000000..7ec92d50b05
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Mapper.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+
+#include <optional>
+
+#include "cros_gralloc/cros_gralloc_driver.h"
+#include "cros_gralloc/cros_gralloc_handle.h"
+
+class CrosGralloc3Mapper : public android::hardware::graphics::mapper::V3_0::IMapper {
+ public:
+ CrosGralloc3Mapper();
+
+ android::hardware::Return<void> createDescriptor(const BufferDescriptorInfo& description,
+ createDescriptor_cb hidlCb) override;
+
+ android::hardware::Return<void> importBuffer(const android::hardware::hidl_handle& rawHandle,
+ importBuffer_cb hidlCb) override;
+
+ android::hardware::Return<android::hardware::graphics::mapper::V3_0::Error> freeBuffer(
+ void* rawHandle) override;
+
+ android::hardware::Return<android::hardware::graphics::mapper::V3_0::Error> validateBufferSize(
+ void* rawHandle, const BufferDescriptorInfo& descriptor, uint32_t stride) override;
+
+ android::hardware::Return<void> getTransportSize(void* rawHandle,
+ getTransportSize_cb hidlCb) override;
+
+ android::hardware::Return<void> lock(void* rawHandle, uint64_t cpuUsage,
+ const Rect& accessRegion,
+ const android::hardware::hidl_handle& acquireFence,
+ lock_cb hidlCb) override;
+
+ android::hardware::Return<void> lockYCbCr(void* rawHandle, uint64_t cpuUsage,
+ const Rect& accessRegion,
+ const android::hardware::hidl_handle& acquireFence,
+ lockYCbCr_cb _hidl_cb) override;
+
+ android::hardware::Return<void> unlock(void* rawHandle, unlock_cb hidlCb) override;
+
+ android::hardware::Return<void> isSupported(const BufferDescriptorInfo& descriptor,
+ isSupported_cb hidlCb) override;
+
+ private:
+ int getResolvedDrmFormat(android::hardware::graphics::common::V1_2::PixelFormat pixelFormat,
+ uint64_t bufferUsage, uint32_t* outDrmFormat);
+
+ struct LockResult {
+ android::hardware::graphics::mapper::V3_0::Error error;
+
+ uint8_t* mapped[DRV_MAX_PLANES];
+ };
+ LockResult lockInternal(cros_gralloc_handle_t crosHandle, uint64_t cpuUsage,
+ const Rect& accessRegion,
+ const android::hardware::hidl_handle& acquireFence);
+
+ std::unique_ptr<cros_gralloc_driver> mDriver;
+};
+
+extern "C" android::hardware::graphics::mapper::V3_0::IMapper* HIDL_FETCH_IMapper(const char* name);
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Utils.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Utils.cc
new file mode 100644
index 00000000000..493fc1fbf45
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Utils.cc
@@ -0,0 +1,402 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc/gralloc3/CrosGralloc3Utils.h"
+
+#include <array>
+#include <limits>
+#include <unordered_map>
+
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+#include <cutils/native_handle.h>
+
+#include "cros_gralloc/cros_gralloc_helpers.h"
+
+using android::hardware::hidl_bitfield;
+using android::hardware::hidl_handle;
+using android::hardware::hidl_vec;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::common::V1_2::PixelFormat;
+
+using BufferDescriptorInfo =
+ android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo;
+
+std::string getPixelFormatString(PixelFormat format) {
+ switch (format) {
+ case PixelFormat::BGRA_8888:
+ return "PixelFormat::BGRA_8888";
+ case PixelFormat::BLOB:
+ return "PixelFormat::BLOB";
+ case PixelFormat::DEPTH_16:
+ return "PixelFormat::DEPTH_16";
+ case PixelFormat::DEPTH_24:
+ return "PixelFormat::DEPTH_24";
+ case PixelFormat::DEPTH_24_STENCIL_8:
+ return "PixelFormat::DEPTH_24_STENCIL_8";
+ case PixelFormat::DEPTH_32F:
+ return "PixelFormat::DEPTH_24";
+ case PixelFormat::DEPTH_32F_STENCIL_8:
+ return "PixelFormat::DEPTH_24_STENCIL_8";
+ case PixelFormat::HSV_888:
+ return "PixelFormat::HSV_888";
+ case PixelFormat::IMPLEMENTATION_DEFINED:
+ return "PixelFormat::IMPLEMENTATION_DEFINED";
+ case PixelFormat::RAW10:
+ return "PixelFormat::RAW10";
+ case PixelFormat::RAW12:
+ return "PixelFormat::RAW12";
+ case PixelFormat::RAW16:
+ return "PixelFormat::RAW16";
+ case PixelFormat::RAW_OPAQUE:
+ return "PixelFormat::RAW_OPAQUE";
+ case PixelFormat::RGBA_1010102:
+ return "PixelFormat::RGBA_1010102";
+ case PixelFormat::RGBA_8888:
+ return "PixelFormat::RGBA_8888";
+ case PixelFormat::RGBA_FP16:
+ return "PixelFormat::RGBA_FP16";
+ case PixelFormat::RGBX_8888:
+ return "PixelFormat::RGBX_8888";
+ case PixelFormat::RGB_565:
+ return "PixelFormat::RGB_565";
+ case PixelFormat::RGB_888:
+ return "PixelFormat::RGB_888";
+ case PixelFormat::STENCIL_8:
+ return "PixelFormat::STENCIL_8";
+ case PixelFormat::Y16:
+ return "PixelFormat::Y16";
+ case PixelFormat::Y8:
+ return "PixelFormat::Y8";
+ case PixelFormat::YCBCR_420_888:
+ return "PixelFormat::YCBCR_420_888";
+ case PixelFormat::YCBCR_422_I:
+ return "PixelFormat::YCBCR_422_I";
+ case PixelFormat::YCBCR_422_SP:
+ return "PixelFormat::YCBCR_422_SP";
+ case PixelFormat::YCBCR_P010:
+ return "PixelFormat::YCBCR_P010";
+ case PixelFormat::YCRCB_420_SP:
+ return "PixelFormat::YCRCB_420_SP";
+ case PixelFormat::YV12:
+ return "PixelFormat::YV12";
+ }
+ return android::base::StringPrintf("PixelFormat::Unknown(%d)", static_cast<uint32_t>(format));
+}
+
+std::string getUsageString(hidl_bitfield<BufferUsage> bufferUsage) {
+ using Underlying = typename std::underlying_type<BufferUsage>::type;
+
+ Underlying usage = static_cast<Underlying>(bufferUsage);
+
+ std::vector<std::string> usages;
+ if (usage & BufferUsage::CAMERA_INPUT) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CAMERA_INPUT);
+ usages.push_back("BufferUsage::CAMERA_INPUT");
+ }
+ if (usage & BufferUsage::CAMERA_OUTPUT) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CAMERA_OUTPUT);
+ usages.push_back("BufferUsage::CAMERA_OUTPUT");
+ }
+ if (usage & BufferUsage::COMPOSER_CURSOR) {
+ usage &= ~static_cast<Underlying>(BufferUsage::COMPOSER_CURSOR);
+ usages.push_back("BufferUsage::COMPOSER_CURSOR");
+ }
+ if (usage & BufferUsage::COMPOSER_OVERLAY) {
+ usage &= ~static_cast<Underlying>(BufferUsage::COMPOSER_OVERLAY);
+ usages.push_back("BufferUsage::COMPOSER_OVERLAY");
+ }
+ if (usage & BufferUsage::CPU_READ_OFTEN) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_READ_OFTEN);
+ usages.push_back("BufferUsage::CPU_READ_OFTEN");
+ }
+ if (usage & BufferUsage::CPU_READ_NEVER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_READ_NEVER);
+ usages.push_back("BufferUsage::CPU_READ_NEVER");
+ }
+ if (usage & BufferUsage::CPU_READ_RARELY) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_READ_RARELY);
+ usages.push_back("BufferUsage::CPU_READ_RARELY");
+ }
+ if (usage & BufferUsage::CPU_WRITE_NEVER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_WRITE_NEVER);
+ usages.push_back("BufferUsage::CPU_WRITE_NEVER");
+ }
+ if (usage & BufferUsage::CPU_WRITE_OFTEN) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_WRITE_OFTEN);
+ usages.push_back("BufferUsage::CPU_WRITE_OFTEN");
+ }
+ if (usage & BufferUsage::CPU_WRITE_RARELY) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_WRITE_RARELY);
+ usages.push_back("BufferUsage::CPU_WRITE_RARELY");
+ }
+ if (usage & BufferUsage::GPU_RENDER_TARGET) {
+ usage &= ~static_cast<Underlying>(BufferUsage::GPU_RENDER_TARGET);
+ usages.push_back("BufferUsage::GPU_RENDER_TARGET");
+ }
+ if (usage & BufferUsage::GPU_TEXTURE) {
+ usage &= ~static_cast<Underlying>(BufferUsage::GPU_TEXTURE);
+ usages.push_back("BufferUsage::GPU_TEXTURE");
+ }
+ if (usage & BufferUsage::PROTECTED) {
+ usage &= ~static_cast<Underlying>(BufferUsage::PROTECTED);
+ usages.push_back("BufferUsage::PROTECTED");
+ }
+ if (usage & BufferUsage::RENDERSCRIPT) {
+ usage &= ~static_cast<Underlying>(BufferUsage::RENDERSCRIPT);
+ usages.push_back("BufferUsage::RENDERSCRIPT");
+ }
+ if (usage & BufferUsage::VIDEO_DECODER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::VIDEO_DECODER);
+ usages.push_back("BufferUsage::VIDEO_DECODER");
+ }
+ if (usage & BufferUsage::VIDEO_ENCODER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::VIDEO_ENCODER);
+ usages.push_back("BufferUsage::VIDEO_ENCODER");
+ }
+
+ if (usage) {
+ usages.push_back(android::base::StringPrintf("UnknownUsageBits-%" PRIu64, usage));
+ }
+
+ return android::base::Join(usages, '|');
+}
+
+int convertToDrmFormat(PixelFormat format, uint32_t* outDrmFormat) {
+ switch (format) {
+ case PixelFormat::BGRA_8888:
+ *outDrmFormat = DRM_FORMAT_ARGB8888;
+ return 0;
+ /**
+ * Choose DRM_FORMAT_R8 because <system/graphics.h> requires the buffers
+ * with a format HAL_PIXEL_FORMAT_BLOB have a height of 1, and width
+ * equal to their size in bytes.
+ */
+ case PixelFormat::BLOB:
+ *outDrmFormat = DRM_FORMAT_R8;
+ return 0;
+ case PixelFormat::DEPTH_16:
+ return -EINVAL;
+ case PixelFormat::DEPTH_24:
+ return -EINVAL;
+ case PixelFormat::DEPTH_24_STENCIL_8:
+ return -EINVAL;
+ case PixelFormat::DEPTH_32F:
+ return -EINVAL;
+ case PixelFormat::DEPTH_32F_STENCIL_8:
+ return -EINVAL;
+ case PixelFormat::HSV_888:
+ return -EINVAL;
+ case PixelFormat::IMPLEMENTATION_DEFINED:
+ *outDrmFormat = DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED;
+ return 0;
+ case PixelFormat::RAW10:
+ return -EINVAL;
+ case PixelFormat::RAW12:
+ return -EINVAL;
+ case PixelFormat::RAW16:
+ *outDrmFormat = DRM_FORMAT_R16;
+ return 0;
+ /* TODO use blob */
+ case PixelFormat::RAW_OPAQUE:
+ return -EINVAL;
+ case PixelFormat::RGBA_1010102:
+ *outDrmFormat = DRM_FORMAT_ABGR2101010;
+ return 0;
+ case PixelFormat::RGBA_8888:
+ *outDrmFormat = DRM_FORMAT_ABGR8888;
+ return 0;
+ case PixelFormat::RGBA_FP16:
+ *outDrmFormat = DRM_FORMAT_ABGR16161616F;
+ return 0;
+ case PixelFormat::RGBX_8888:
+ *outDrmFormat = DRM_FORMAT_XBGR8888;
+ return 0;
+ case PixelFormat::RGB_565:
+ *outDrmFormat = DRM_FORMAT_RGB565;
+ return 0;
+ case PixelFormat::RGB_888:
+ *outDrmFormat = DRM_FORMAT_RGB888;
+ return 0;
+ case PixelFormat::STENCIL_8:
+ return -EINVAL;
+ case PixelFormat::Y16:
+ *outDrmFormat = DRM_FORMAT_R16;
+ return 0;
+ case PixelFormat::Y8:
+ *outDrmFormat = DRM_FORMAT_R8;
+ return 0;
+ case PixelFormat::YCBCR_420_888:
+ *outDrmFormat = DRM_FORMAT_FLEX_YCbCr_420_888;
+ return 0;
+ case PixelFormat::YCBCR_422_SP:
+ return -EINVAL;
+ case PixelFormat::YCBCR_422_I:
+ return -EINVAL;
+ case PixelFormat::YCBCR_P010:
+ *outDrmFormat = DRM_FORMAT_P010;
+ return 0;
+ case PixelFormat::YCRCB_420_SP:
+ *outDrmFormat = DRM_FORMAT_NV21;
+ return 0;
+ case PixelFormat::YV12:
+ *outDrmFormat = DRM_FORMAT_YVU420_ANDROID;
+ return 0;
+ };
+ return -EINVAL;
+}
+
+int convertToBufferUsage(uint64_t grallocUsage, uint64_t* outBufferUsage) {
+ uint64_t bufferUsage = BO_USE_NONE;
+
+ if ((grallocUsage & BufferUsage::CPU_READ_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_READ_RARELY)) {
+ bufferUsage |= BO_USE_SW_READ_RARELY;
+ }
+ if ((grallocUsage & BufferUsage::CPU_READ_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_READ_OFTEN)) {
+ bufferUsage |= BO_USE_SW_READ_OFTEN;
+ }
+ if ((grallocUsage & BufferUsage::CPU_WRITE_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_WRITE_RARELY)) {
+ bufferUsage |= BO_USE_SW_WRITE_RARELY;
+ }
+ if ((grallocUsage & BufferUsage::CPU_WRITE_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_WRITE_OFTEN)) {
+ bufferUsage |= BO_USE_SW_WRITE_OFTEN;
+ }
+ if (grallocUsage & BufferUsage::GPU_TEXTURE) {
+ bufferUsage |= BO_USE_TEXTURE;
+ }
+ if (grallocUsage & BufferUsage::GPU_RENDER_TARGET) {
+ bufferUsage |= BO_USE_RENDERING;
+ }
+ if (grallocUsage & BufferUsage::COMPOSER_OVERLAY) {
+ /* HWC wants to use display hardware, but can defer to OpenGL. */
+ bufferUsage |= BO_USE_SCANOUT | BO_USE_TEXTURE;
+ }
+ /* Map this flag to linear until real HW protection is available on Android. */
+ if (grallocUsage & BufferUsage::PROTECTED) {
+ bufferUsage |= BO_USE_LINEAR;
+ }
+ if (grallocUsage & BufferUsage::COMPOSER_CURSOR) {
+ bufferUsage |= BO_USE_NONE;
+ }
+ if (grallocUsage & BufferUsage::VIDEO_ENCODER) {
+ /*HACK: See b/30054495 */
+ bufferUsage |= BO_USE_SW_READ_OFTEN;
+ }
+ if (grallocUsage & BufferUsage::CAMERA_OUTPUT) {
+ bufferUsage |= BO_USE_CAMERA_WRITE;
+ }
+ if (grallocUsage & BufferUsage::CAMERA_INPUT) {
+ bufferUsage |= BO_USE_CAMERA_READ;
+ }
+ if (grallocUsage & BufferUsage::RENDERSCRIPT) {
+ bufferUsage |= BO_USE_RENDERSCRIPT;
+ }
+ if (grallocUsage & BufferUsage::VIDEO_DECODER) {
+ bufferUsage |= BO_USE_HW_VIDEO_DECODER;
+ }
+
+ *outBufferUsage = bufferUsage;
+ return 0;
+}
+
+int convertToMapUsage(uint64_t grallocUsage, uint32_t* outMapUsage) {
+ uint32_t mapUsage = BO_MAP_NONE;
+
+ if (grallocUsage & BufferUsage::CPU_READ_MASK) {
+ mapUsage |= BO_MAP_READ;
+ }
+ if (grallocUsage & BufferUsage::CPU_WRITE_MASK) {
+ mapUsage |= BO_MAP_WRITE;
+ }
+
+ *outMapUsage = mapUsage;
+ return 0;
+}
+
+int convertToCrosDescriptor(const BufferDescriptorInfo& descriptor,
+ struct cros_gralloc_buffer_descriptor* outCrosDescriptor) {
+ outCrosDescriptor->width = descriptor.width;
+ outCrosDescriptor->height = descriptor.height;
+ outCrosDescriptor->droid_format = static_cast<int32_t>(descriptor.format);
+ outCrosDescriptor->droid_usage = descriptor.usage;
+ outCrosDescriptor->reserved_region_size = 0;
+ if (descriptor.layerCount > 1) {
+ drv_log("Failed to convert descriptor. Unsupported layerCount: %d\n",
+ descriptor.layerCount);
+ return -1;
+ }
+ if (convertToDrmFormat(descriptor.format, &outCrosDescriptor->drm_format)) {
+ std::string pixelFormatString = getPixelFormatString(descriptor.format);
+ drv_log("Failed to convert descriptor. Unsupported format %s\n", pixelFormatString.c_str());
+ return -1;
+ }
+ if (convertToBufferUsage(descriptor.usage, &outCrosDescriptor->use_flags)) {
+ std::string usageString = getUsageString(descriptor.usage);
+ drv_log("Failed to convert descriptor. Unsupported usage flags %s\n", usageString.c_str());
+ return -1;
+ }
+ return 0;
+}
+
+int convertToFenceFd(const hidl_handle& fenceHandle, int* outFenceFd) {
+ if (!outFenceFd) {
+ return -EINVAL;
+ }
+
+ const native_handle_t* nativeHandle = fenceHandle.getNativeHandle();
+ if (nativeHandle && nativeHandle->numFds > 1) {
+ return -EINVAL;
+ }
+
+ *outFenceFd = (nativeHandle && nativeHandle->numFds == 1) ? nativeHandle->data[0] : -1;
+ return 0;
+}
+
+int convertToFenceHandle(int fenceFd, hidl_handle* outFenceHandle) {
+ if (!outFenceHandle) {
+ return -EINVAL;
+ }
+ if (fenceFd < 0) {
+ return 0;
+ }
+
+ NATIVE_HANDLE_DECLARE_STORAGE(handleStorage, 1, 0);
+ auto fenceHandle = native_handle_init(handleStorage, 1, 0);
+ fenceHandle->data[0] = fenceFd;
+
+ *outFenceHandle = fenceHandle;
+ return 0;
+}
+
+std::optional<BufferDescriptorInfo> decodeBufferDescriptorInfo(const hidl_vec<uint32_t>& encoded) {
+ if (encoded.size() != 5) {
+ drv_log("Failed to decodeBufferDescriptorInfo. Invalid size: %zd.\n", encoded.size());
+ return {};
+ }
+
+ BufferDescriptorInfo descriptor;
+ descriptor.width = encoded[0];
+ descriptor.height = encoded[1];
+ descriptor.layerCount = encoded[2];
+ descriptor.format = static_cast<PixelFormat>(encoded[3]);
+ descriptor.usage = encoded[4];
+ return std::move(descriptor);
+}
+
+std::optional<hidl_vec<uint32_t>> encodeBufferDescriptorInfo(const BufferDescriptorInfo& info) {
+ hidl_vec<uint32_t> encoded;
+ encoded.resize(5);
+ encoded[0] = info.width;
+ encoded[1] = info.height;
+ encoded[2] = info.layerCount;
+ encoded[3] = static_cast<uint32_t>(info.format);
+ encoded[4] = info.usage & std::numeric_limits<uint32_t>::max();
+ return std::move(encoded);
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Utils.h b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Utils.h
new file mode 100644
index 00000000000..0492568d721
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/CrosGralloc3Utils.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <optional>
+#include <string>
+#include <vector>
+
+#include <android/hardware/graphics/common/1.2/types.h>
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+
+std::string getPixelFormatString(android::hardware::graphics::common::V1_2::PixelFormat format);
+
+std::string getUsageString(
+ android::hardware::hidl_bitfield<android::hardware::graphics::common::V1_2::BufferUsage>
+ usage);
+
+int convertToDrmFormat(android::hardware::graphics::common::V1_2::PixelFormat format,
+ uint32_t* outDrmFormat);
+
+int convertToBufferUsage(uint64_t grallocUsage, uint64_t* outBufferUsage);
+
+int convertToMapUsage(uint64_t grallocUsage, uint32_t* outMapUsage);
+
+int convertToCrosDescriptor(
+ const android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo& descriptor,
+ struct cros_gralloc_buffer_descriptor* outCrosDescriptor);
+
+int convertToFenceFd(const android::hardware::hidl_handle& fence_handle, int* out_fence_fd);
+
+int convertToFenceHandle(int fence_fd, android::hardware::hidl_handle* out_fence_handle);
+
+std::optional<android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo>
+decodeBufferDescriptorInfo(const android::hardware::hidl_vec<uint32_t>& encoded);
+
+std::optional<android::hardware::hidl_vec<uint32_t>> encodeBufferDescriptorInfo(
+ const android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo& info);
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/android.hardware.graphics.allocator@3.0-service.minigbm.rc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/android.hardware.graphics.allocator@3.0-service.minigbm.rc
new file mode 100644
index 00000000000..7377cee7444
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc3/android.hardware.graphics.allocator@3.0-service.minigbm.rc
@@ -0,0 +1,14 @@
+#
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+service vendor.graphics.allocator-3-0 /vendor/bin/hw/android.hardware.graphics.allocator@3.0-service.minigbm
+ interface android.hardware.graphics.allocator@3.0::IAllocator default
+ class hal animation
+ user system
+ group graphics drmrpc
+ capabilities SYS_NICE
+ onrestart restart surfaceflinger
+ writepid /dev/cpuset/system-background/tasks
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/.clang-format b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/.clang-format
new file mode 100644
index 00000000000..b310cc1f66b
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/.clang-format
@@ -0,0 +1,19 @@
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# This directory is formatted to match the format of the interfaces implemented.
+
+BasedOnStyle: Google
+Standard: Cpp11
+AccessModifierOffset: -2
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+IncludeBlocks: Preserve
+IndentWidth: 4
+ContinuationIndentWidth: 8
+PointerAlignment: Left
+TabWidth: 4
+UseTab: Never \ No newline at end of file
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Allocator.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Allocator.cc
new file mode 100644
index 00000000000..e7e5f3a835c
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Allocator.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc/gralloc4/CrosGralloc4Allocator.h"
+
+#include <android/hardware/graphics/mapper/4.0/IMapper.h>
+#include <gralloctypes/Gralloc4.h>
+
+#include "cros_gralloc/cros_gralloc_helpers.h"
+#include "cros_gralloc/gralloc4/CrosGralloc4Utils.h"
+
+using android::hardware::hidl_handle;
+using android::hardware::hidl_vec;
+using android::hardware::Return;
+using android::hardware::Void;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::common::V1_2::PixelFormat;
+using android::hardware::graphics::mapper::V4_0::Error;
+
+using BufferDescriptorInfo =
+ android::hardware::graphics::mapper::V4_0::IMapper::BufferDescriptorInfo;
+
+CrosGralloc4Allocator::CrosGralloc4Allocator() : mDriver(std::make_unique<cros_gralloc_driver>()) {
+ if (mDriver->init()) {
+ drv_log("Failed to initialize driver.\n");
+ mDriver = nullptr;
+ }
+}
+
+Error CrosGralloc4Allocator::allocate(const BufferDescriptorInfo& descriptor, uint32_t* outStride,
+ hidl_handle* outHandle) {
+ if (!mDriver) {
+ drv_log("Failed to allocate. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ if (!outStride || !outHandle) {
+ return Error::NO_RESOURCES;
+ }
+
+ struct cros_gralloc_buffer_descriptor crosDescriptor;
+ if (convertToCrosDescriptor(descriptor, &crosDescriptor)) {
+ return Error::UNSUPPORTED;
+ }
+
+ bool supported = mDriver->is_supported(&crosDescriptor);
+ if (!supported && (descriptor.usage & BufferUsage::COMPOSER_OVERLAY)) {
+ crosDescriptor.use_flags &= ~BO_USE_SCANOUT;
+ supported = mDriver->is_supported(&crosDescriptor);
+ }
+
+ if (!supported) {
+ std::string drmFormatString = get_drm_format_string(crosDescriptor.drm_format);
+ std::string pixelFormatString = getPixelFormatString(descriptor.format);
+ std::string usageString = getUsageString(descriptor.usage);
+ drv_log("Unsupported combination -- pixel format: %s, drm format:%s, usage: %s\n",
+ pixelFormatString.c_str(), drmFormatString.c_str(), usageString.c_str());
+ return Error::UNSUPPORTED;
+ }
+
+ buffer_handle_t handle;
+ int ret = mDriver->allocate(&crosDescriptor, &handle);
+ if (ret) {
+ return Error::NO_RESOURCES;
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(handle);
+ if (!crosHandle) {
+ return Error::NO_RESOURCES;
+ }
+
+ *outHandle = handle;
+ *outStride = crosHandle->pixel_stride;
+
+ return Error::NONE;
+}
+
+Return<void> CrosGralloc4Allocator::allocate(const hidl_vec<uint8_t>& descriptor, uint32_t count,
+ allocate_cb hidlCb) {
+ hidl_vec<hidl_handle> handles;
+
+ if (!mDriver) {
+ drv_log("Failed to allocate. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, 0, handles);
+ return Void();
+ }
+
+ BufferDescriptorInfo description;
+
+ int ret = android::gralloc4::decodeBufferDescriptorInfo(descriptor, &description);
+ if (ret) {
+ drv_log("Failed to allocate. Failed to decode buffer descriptor: %d.\n", ret);
+ hidlCb(Error::BAD_DESCRIPTOR, 0, handles);
+ return Void();
+ }
+
+ handles.resize(count);
+
+ uint32_t stride = 0;
+ for (int i = 0; i < handles.size(); i++) {
+ Error err = allocate(description, &stride, &(handles[i]));
+ if (err != Error::NONE) {
+ for (int j = 0; j < i; j++) {
+ mDriver->release(handles[j].getNativeHandle());
+ }
+ handles.resize(0);
+ hidlCb(err, 0, handles);
+ return Void();
+ }
+ }
+
+ hidlCb(Error::NONE, stride, handles);
+
+ for (const hidl_handle& handle : handles) {
+ mDriver->release(handle.getNativeHandle());
+ }
+
+ return Void();
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Allocator.h b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Allocator.h
new file mode 100644
index 00000000000..21ad7ad2562
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Allocator.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <android/hardware/graphics/allocator/4.0/IAllocator.h>
+#include <android/hardware/graphics/mapper/4.0/IMapper.h>
+
+#include "cros_gralloc/cros_gralloc_driver.h"
+
+class CrosGralloc4Allocator : public android::hardware::graphics::allocator::V4_0::IAllocator {
+ public:
+ CrosGralloc4Allocator();
+
+ android::hardware::Return<void> allocate(const android::hardware::hidl_vec<uint8_t>& descriptor,
+ uint32_t count, allocate_cb hidl_cb) override;
+
+ private:
+ android::hardware::graphics::mapper::V4_0::Error allocate(
+ const android::hardware::graphics::mapper::V4_0::IMapper::BufferDescriptorInfo&
+ description,
+ uint32_t* outStride, android::hardware::hidl_handle* outHandle);
+
+ std::unique_ptr<cros_gralloc_driver> mDriver;
+};
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4AllocatorService.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4AllocatorService.cc
new file mode 100644
index 00000000000..5b798607057
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4AllocatorService.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#define LOG_TAG "AllocatorService"
+
+#include <hidl/LegacySupport.h>
+
+#include "cros_gralloc/gralloc4/CrosGralloc4Allocator.h"
+
+using android::sp;
+using android::hardware::configureRpcThreadpool;
+using android::hardware::joinRpcThreadpool;
+using android::hardware::graphics::allocator::V4_0::IAllocator;
+
+int main(int, char**) {
+ sp<IAllocator> allocator = new CrosGralloc4Allocator();
+ configureRpcThreadpool(4, true /* callerWillJoin */);
+ if (allocator->registerAsService() != android::NO_ERROR) {
+ ALOGE("failed to register graphics IAllocator 4.0 service");
+ return -EINVAL;
+ }
+
+ ALOGI("graphics IAllocator 4.0 service is initialized");
+ android::hardware::joinRpcThreadpool();
+ ALOGI("graphics IAllocator 4.0 service is terminating");
+ return 0;
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Mapper.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Mapper.cc
new file mode 100644
index 00000000000..0e26156e4be
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Mapper.cc
@@ -0,0 +1,1011 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc/gralloc4/CrosGralloc4Mapper.h"
+
+#include <aidl/android/hardware/graphics/common/BlendMode.h>
+#include <aidl/android/hardware/graphics/common/Dataspace.h>
+#include <aidl/android/hardware/graphics/common/PlaneLayout.h>
+#include <aidl/android/hardware/graphics/common/Rect.h>
+#include <cutils/native_handle.h>
+#include <gralloctypes/Gralloc4.h>
+
+#include "cros_gralloc/cros_gralloc_helpers.h"
+#include "cros_gralloc/gralloc4/CrosGralloc4Utils.h"
+
+#include "helpers.h"
+
+using aidl::android::hardware::graphics::common::BlendMode;
+using aidl::android::hardware::graphics::common::Dataspace;
+using aidl::android::hardware::graphics::common::PlaneLayout;
+using aidl::android::hardware::graphics::common::Rect;
+using android::hardware::hidl_handle;
+using android::hardware::hidl_vec;
+using android::hardware::Return;
+using android::hardware::Void;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::common::V1_2::PixelFormat;
+using android::hardware::graphics::mapper::V4_0::Error;
+using android::hardware::graphics::mapper::V4_0::IMapper;
+
+CrosGralloc4Mapper::CrosGralloc4Mapper() : mDriver(std::make_unique<cros_gralloc_driver>()) {
+ if (mDriver->init()) {
+ drv_log("Failed to initialize driver.\n");
+ mDriver = nullptr;
+ }
+}
+
+Return<void> CrosGralloc4Mapper::createDescriptor(const BufferDescriptorInfo& description,
+ createDescriptor_cb hidlCb) {
+ hidl_vec<uint8_t> descriptor;
+
+ if (description.width == 0) {
+ drv_log("Failed to createDescriptor. Bad width: %d.\n", description.width);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ if (description.height == 0) {
+ drv_log("Failed to createDescriptor. Bad height: %d.\n", description.height);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ if (description.layerCount == 0) {
+ drv_log("Failed to createDescriptor. Bad layer count: %d.\n", description.layerCount);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ int ret = android::gralloc4::encodeBufferDescriptorInfo(description, &descriptor);
+ if (ret) {
+ drv_log("Failed to createDescriptor. Failed to encode: %d.\n", ret);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, descriptor);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::importBuffer(const hidl_handle& handle, importBuffer_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to import buffer. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ const native_handle_t* bufferHandle = handle.getNativeHandle();
+ if (!bufferHandle || bufferHandle->numFds == 0) {
+ drv_log("Failed to importBuffer. Bad handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ native_handle_t* importedBufferHandle = native_handle_clone(bufferHandle);
+ if (!importedBufferHandle) {
+ drv_log("Failed to importBuffer. Handle clone failed: %s.\n", strerror(errno));
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ int ret = mDriver->retain(importedBufferHandle);
+ if (ret) {
+ native_handle_close(importedBufferHandle);
+ native_handle_delete(importedBufferHandle);
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, importedBufferHandle);
+ return Void();
+}
+
+Return<Error> CrosGralloc4Mapper::freeBuffer(void* rawHandle) {
+ if (!mDriver) {
+ drv_log("Failed to freeBuffer. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ native_handle_t* bufferHandle = reinterpret_cast<native_handle_t*>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to freeBuffer. Empty handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ int ret = mDriver->release(bufferHandle);
+ if (ret) {
+ return Error::BAD_BUFFER;
+ }
+
+ native_handle_close(bufferHandle);
+ native_handle_delete(bufferHandle);
+ return Error::NONE;
+}
+
+Return<Error> CrosGralloc4Mapper::validateBufferSize(void* rawHandle,
+ const BufferDescriptorInfo& descriptor,
+ uint32_t stride) {
+ if (!mDriver) {
+ drv_log("Failed to validateBufferSize. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ native_handle_t* bufferHandle = reinterpret_cast<native_handle_t*>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to validateBufferSize. Empty handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (!crosHandle) {
+ drv_log("Failed to validateBufferSize. Invalid handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ PixelFormat crosHandleFormat = static_cast<PixelFormat>(crosHandle->droid_format);
+ if (descriptor.format != crosHandleFormat) {
+ drv_log("Failed to validateBufferSize. Format mismatch.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ if (descriptor.width != crosHandle->width) {
+ drv_log("Failed to validateBufferSize. Width mismatch (%d vs %d).\n", descriptor.width,
+ crosHandle->width);
+ return Error::BAD_VALUE;
+ }
+
+ if (descriptor.height != crosHandle->height) {
+ drv_log("Failed to validateBufferSize. Height mismatch (%d vs %d).\n", descriptor.height,
+ crosHandle->height);
+ return Error::BAD_VALUE;
+ }
+
+ if (stride != crosHandle->pixel_stride) {
+ drv_log("Failed to validateBufferSize. Stride mismatch (%d vs %d).\n", stride,
+ crosHandle->pixel_stride);
+ return Error::BAD_VALUE;
+ }
+
+ return Error::NONE;
+}
+
+Return<void> CrosGralloc4Mapper::getTransportSize(void* rawHandle, getTransportSize_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to getTransportSize. Driver is uninitialized.\n");
+ hidlCb(Error::BAD_BUFFER, 0, 0);
+ return Void();
+ }
+
+ native_handle_t* bufferHandle = reinterpret_cast<native_handle_t*>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to getTransportSize. Bad handle.\n");
+ hidlCb(Error::BAD_BUFFER, 0, 0);
+ return Void();
+ }
+
+ // No local process data is currently stored on the native handle.
+ hidlCb(Error::NONE, bufferHandle->numFds, bufferHandle->numInts);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::lock(void* rawBuffer, uint64_t cpuUsage, const Rect& region,
+ const hidl_handle& acquireFence, lock_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to lock. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawBuffer);
+ if (!bufferHandle) {
+ drv_log("Failed to lock. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ if (cpuUsage == 0) {
+ drv_log("Failed to lock. Bad cpu usage: %" PRIu64 ".\n", cpuUsage);
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ uint32_t mapUsage = 0;
+ int ret = convertToMapUsage(cpuUsage, &mapUsage);
+ if (ret) {
+ drv_log("Failed to lock. Convert usage failed.\n");
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (crosHandle == nullptr) {
+ drv_log("Failed to lock. Invalid handle.\n");
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ if (region.left < 0) {
+ drv_log("Failed to lock. Invalid region: negative left value %d.\n", region.left);
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ if (region.top < 0) {
+ drv_log("Failed to lock. Invalid region: negative top value %d.\n", region.top);
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ if (region.width < 0) {
+ drv_log("Failed to lock. Invalid region: negative width value %d.\n", region.width);
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ if (region.height < 0) {
+ drv_log("Failed to lock. Invalid region: negative height value %d.\n", region.height);
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ if (region.width > crosHandle->width) {
+ drv_log("Failed to lock. Invalid region: width greater than buffer width (%d vs %d).\n",
+ region.width, crosHandle->width);
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ if (region.height > crosHandle->height) {
+ drv_log("Failed to lock. Invalid region: height greater than buffer height (%d vs %d).\n",
+ region.height, crosHandle->height);
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ struct rectangle rect = {static_cast<uint32_t>(region.left), static_cast<uint32_t>(region.top),
+ static_cast<uint32_t>(region.width),
+ static_cast<uint32_t>(region.height)};
+
+ // An access region of all zeros means the entire buffer.
+ if (rect.x == 0 && rect.y == 0 && rect.width == 0 && rect.height == 0) {
+ rect.width = crosHandle->width;
+ rect.height = crosHandle->height;
+ }
+
+ int acquireFenceFd = -1;
+ ret = convertToFenceFd(acquireFence, &acquireFenceFd);
+ if (ret) {
+ drv_log("Failed to lock. Bad acquire fence.\n");
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ uint8_t* addr[DRV_MAX_PLANES];
+ ret = mDriver->lock(bufferHandle, acquireFenceFd, /*close_acquire_fence=*/false, &rect,
+ mapUsage, addr);
+ if (ret) {
+ hidlCb(Error::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, addr[0]);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::unlock(void* rawHandle, unlock_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to unlock. Driver is uninitialized.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to unlock. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ int releaseFenceFd = -1;
+ int ret = mDriver->unlock(bufferHandle, &releaseFenceFd);
+ if (ret) {
+ drv_log("Failed to unlock.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ hidl_handle releaseFenceHandle;
+ ret = convertToFenceHandle(releaseFenceFd, &releaseFenceHandle);
+ if (ret) {
+ drv_log("Failed to unlock. Failed to convert release fence to handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, releaseFenceHandle);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::flushLockedBuffer(void* rawHandle, flushLockedBuffer_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to flushLockedBuffer. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to flushLockedBuffer. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ int releaseFenceFd = -1;
+ int ret = mDriver->flush(bufferHandle, &releaseFenceFd);
+ if (ret) {
+ drv_log("Failed to flushLockedBuffer. Flush failed.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ hidl_handle releaseFenceHandle;
+ ret = convertToFenceHandle(releaseFenceFd, &releaseFenceHandle);
+ if (ret) {
+ drv_log("Failed to flushLockedBuffer. Failed to convert release fence to handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, releaseFenceHandle);
+ return Void();
+}
+
+Return<Error> CrosGralloc4Mapper::rereadLockedBuffer(void* rawHandle) {
+ if (!mDriver) {
+ drv_log("Failed to rereadLockedBuffer. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to rereadLockedBuffer. Empty handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ int ret = mDriver->invalidate(bufferHandle);
+ if (ret) {
+ drv_log("Failed to rereadLockedBuffer. Failed to invalidate.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ return Error::NONE;
+}
+
+Return<void> CrosGralloc4Mapper::isSupported(const BufferDescriptorInfo& descriptor,
+ isSupported_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to isSupported. Driver is uninitialized.\n");
+ hidlCb(Error::BAD_VALUE, false);
+ return Void();
+ }
+
+ struct cros_gralloc_buffer_descriptor crosDescriptor;
+ if (convertToCrosDescriptor(descriptor, &crosDescriptor)) {
+ hidlCb(Error::NONE, false);
+ return Void();
+ }
+
+ bool supported = mDriver->is_supported(&crosDescriptor);
+ if (!supported) {
+ crosDescriptor.use_flags &= ~BO_USE_SCANOUT;
+ supported = mDriver->is_supported(&crosDescriptor);
+ }
+
+ hidlCb(Error::NONE, supported);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::get(void* rawHandle, const MetadataType& metadataType,
+ get_cb hidlCb) {
+ hidl_vec<uint8_t> encodedMetadata;
+
+ if (!mDriver) {
+ drv_log("Failed to get. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, encodedMetadata);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to get. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, encodedMetadata);
+ return Void();
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (!crosHandle) {
+ drv_log("Failed to get. Invalid handle.\n");
+ hidlCb(Error::BAD_BUFFER, encodedMetadata);
+ return Void();
+ }
+
+ get(crosHandle, metadataType, hidlCb);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::get(cros_gralloc_handle_t crosHandle,
+ const MetadataType& metadataType, get_cb hidlCb) {
+ hidl_vec<uint8_t> encodedMetadata;
+
+ if (!mDriver) {
+ drv_log("Failed to get. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, encodedMetadata);
+ return Void();
+ }
+
+ if (!crosHandle) {
+ drv_log("Failed to get. Invalid handle.\n");
+ hidlCb(Error::BAD_BUFFER, encodedMetadata);
+ return Void();
+ }
+
+ android::status_t status = android::NO_ERROR;
+ if (metadataType == android::gralloc4::MetadataType_BufferId) {
+ status = android::gralloc4::encodeBufferId(crosHandle->id, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Name) {
+ const char* name = (const char*)(&crosHandle->base.data[crosHandle->name_offset]);
+ status = android::gralloc4::encodeName(name, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Width) {
+ status = android::gralloc4::encodeWidth(crosHandle->width, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Height) {
+ status = android::gralloc4::encodeHeight(crosHandle->height, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_LayerCount) {
+ status = android::gralloc4::encodeLayerCount(1, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_PixelFormatRequested) {
+ PixelFormat pixelFormat = static_cast<PixelFormat>(crosHandle->droid_format);
+ status = android::gralloc4::encodePixelFormatRequested(pixelFormat, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_PixelFormatFourCC) {
+ uint32_t format = crosHandle->format;
+ // Map internal fourcc codes back to standard fourcc codes.
+ if (format == DRM_FORMAT_YVU420_ANDROID) {
+ format = DRM_FORMAT_YVU420;
+ }
+ status = android::gralloc4::encodePixelFormatFourCC(format, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_PixelFormatModifier) {
+ status = android::gralloc4::encodePixelFormatModifier(crosHandle->format_modifier,
+ &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Usage) {
+ uint64_t usage = static_cast<uint64_t>(crosHandle->usage);
+ status = android::gralloc4::encodeUsage(usage, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_AllocationSize) {
+ status = android::gralloc4::encodeAllocationSize(crosHandle->total_size, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_ProtectedContent) {
+ uint64_t hasProtectedContent = crosHandle->usage & BufferUsage::PROTECTED ? 1 : 0;
+ status = android::gralloc4::encodeProtectedContent(hasProtectedContent, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Compression) {
+ status = android::gralloc4::encodeCompression(android::gralloc4::Compression_None,
+ &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Interlaced) {
+ status = android::gralloc4::encodeInterlaced(android::gralloc4::Interlaced_None,
+ &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_ChromaSiting) {
+ status = android::gralloc4::encodeChromaSiting(android::gralloc4::ChromaSiting_None,
+ &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_PlaneLayouts) {
+ std::vector<PlaneLayout> planeLayouts;
+ getPlaneLayouts(crosHandle->format, &planeLayouts);
+
+ for (size_t plane = 0; plane < planeLayouts.size(); plane++) {
+ PlaneLayout& planeLayout = planeLayouts[plane];
+ planeLayout.offsetInBytes = crosHandle->offsets[plane];
+ planeLayout.strideInBytes = crosHandle->strides[plane];
+ planeLayout.totalSizeInBytes = crosHandle->sizes[plane];
+ planeLayout.widthInSamples = crosHandle->width / planeLayout.horizontalSubsampling;
+ planeLayout.heightInSamples = crosHandle->height / planeLayout.verticalSubsampling;
+ }
+
+ status = android::gralloc4::encodePlaneLayouts(planeLayouts, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Crop) {
+ std::vector<aidl::android::hardware::graphics::common::Rect> crops;
+ for (size_t plane = 0; plane < crosHandle->num_planes; plane++) {
+ aidl::android::hardware::graphics::common::Rect crop;
+ crop.left = 0;
+ crop.top = 0;
+ crop.right = crosHandle->width;
+ crop.bottom = crosHandle->height;
+ crops.push_back(crop);
+ }
+
+ status = android::gralloc4::encodeCrop(crops, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Dataspace) {
+ status = android::gralloc4::encodeDataspace(Dataspace::UNKNOWN, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_BlendMode) {
+ status = android::gralloc4::encodeBlendMode(BlendMode::INVALID, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Smpte2086) {
+ status = android::gralloc4::encodeSmpte2086(std::nullopt, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Cta861_3) {
+ status = android::gralloc4::encodeCta861_3(std::nullopt, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Smpte2094_40) {
+ status = android::gralloc4::encodeSmpte2094_40(std::nullopt, &encodedMetadata);
+ } else {
+ hidlCb(Error::UNSUPPORTED, encodedMetadata);
+ return Void();
+ }
+
+ if (status != android::NO_ERROR) {
+ hidlCb(Error::NO_RESOURCES, encodedMetadata);
+ drv_log("Failed to get. Failed to encode metadata.\n");
+ return Void();
+ }
+
+ hidlCb(Error::NONE, encodedMetadata);
+ return Void();
+}
+
+Return<Error> CrosGralloc4Mapper::set(void* rawHandle, const MetadataType& metadataType,
+ const hidl_vec<uint8_t>& /*metadata*/) {
+ if (!mDriver) {
+ drv_log("Failed to set. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to set. Empty handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (!crosHandle) {
+ drv_log("Failed to set. Invalid handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ if (metadataType == android::gralloc4::MetadataType_BufferId) {
+ return Error::BAD_VALUE;
+ } else if (metadataType == android::gralloc4::MetadataType_Name) {
+ return Error::BAD_VALUE;
+ } else if (metadataType == android::gralloc4::MetadataType_Width) {
+ return Error::BAD_VALUE;
+ } else if (metadataType == android::gralloc4::MetadataType_Height) {
+ return Error::BAD_VALUE;
+ } else if (metadataType == android::gralloc4::MetadataType_LayerCount) {
+ return Error::BAD_VALUE;
+ } else if (metadataType == android::gralloc4::MetadataType_PixelFormatRequested) {
+ return Error::BAD_VALUE;
+ } else if (metadataType == android::gralloc4::MetadataType_Usage) {
+ return Error::BAD_VALUE;
+ }
+
+ return Error::UNSUPPORTED;
+}
+
+int CrosGralloc4Mapper::getResolvedDrmFormat(PixelFormat pixelFormat, uint64_t bufferUsage,
+ uint32_t* outDrmFormat) {
+ uint32_t drmFormat;
+ if (convertToDrmFormat(pixelFormat, &drmFormat)) {
+ std::string pixelFormatString = getPixelFormatString(pixelFormat);
+ drv_log("Failed to getResolvedDrmFormat. Failed to convert format %s\n",
+ pixelFormatString.c_str());
+ return -1;
+ }
+
+ uint64_t usage;
+ if (convertToBufferUsage(bufferUsage, &usage)) {
+ std::string usageString = getUsageString(bufferUsage);
+ drv_log("Failed to getResolvedDrmFormat. Failed to convert usage %s\n",
+ usageString.c_str());
+ return -1;
+ }
+
+ uint32_t resolvedDrmFormat = mDriver->get_resolved_drm_format(drmFormat, usage);
+ if (resolvedDrmFormat == DRM_FORMAT_INVALID) {
+ std::string drmFormatString = get_drm_format_string(drmFormat);
+ drv_log("Failed to getResolvedDrmFormat. Failed to resolve drm format %s\n",
+ drmFormatString.c_str());
+ return -1;
+ }
+
+ *outDrmFormat = resolvedDrmFormat;
+
+ return 0;
+}
+
+Return<void> CrosGralloc4Mapper::getFromBufferDescriptorInfo(
+ const BufferDescriptorInfo& descriptor, const MetadataType& metadataType,
+ getFromBufferDescriptorInfo_cb hidlCb) {
+ hidl_vec<uint8_t> encodedMetadata;
+
+ if (!mDriver) {
+ drv_log("Failed to getFromBufferDescriptorInfo. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, encodedMetadata);
+ return Void();
+ }
+
+ android::status_t status = android::NO_ERROR;
+ if (metadataType == android::gralloc4::MetadataType_Name) {
+ status = android::gralloc4::encodeName(descriptor.name, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Width) {
+ status = android::gralloc4::encodeWidth(descriptor.width, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Height) {
+ status = android::gralloc4::encodeHeight(descriptor.height, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_LayerCount) {
+ status = android::gralloc4::encodeLayerCount(1, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_PixelFormatRequested) {
+ status = android::gralloc4::encodePixelFormatRequested(descriptor.format, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_PixelFormatFourCC) {
+ uint32_t drmFormat;
+ if (getResolvedDrmFormat(descriptor.format, descriptor.usage, &drmFormat)) {
+ hidlCb(Error::BAD_VALUE, encodedMetadata);
+ return Void();
+ }
+ status = android::gralloc4::encodePixelFormatFourCC(drmFormat, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Usage) {
+ status = android::gralloc4::encodeUsage(descriptor.usage, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_ProtectedContent) {
+ uint64_t hasProtectedContent = descriptor.usage & BufferUsage::PROTECTED ? 1 : 0;
+ status = android::gralloc4::encodeProtectedContent(hasProtectedContent, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Compression) {
+ status = android::gralloc4::encodeCompression(android::gralloc4::Compression_None,
+ &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Interlaced) {
+ status = android::gralloc4::encodeInterlaced(android::gralloc4::Interlaced_None,
+ &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_ChromaSiting) {
+ status = android::gralloc4::encodeChromaSiting(android::gralloc4::ChromaSiting_None,
+ &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Crop) {
+ uint32_t drmFormat;
+ if (getResolvedDrmFormat(descriptor.format, descriptor.usage, &drmFormat)) {
+ hidlCb(Error::BAD_VALUE, encodedMetadata);
+ return Void();
+ }
+
+ size_t numPlanes = drv_num_planes_from_format(drmFormat);
+
+ std::vector<aidl::android::hardware::graphics::common::Rect> crops;
+ for (size_t plane = 0; plane < numPlanes; plane++) {
+ aidl::android::hardware::graphics::common::Rect crop;
+ crop.left = 0;
+ crop.top = 0;
+ crop.right = descriptor.width;
+ crop.bottom = descriptor.height;
+ crops.push_back(crop);
+ }
+ status = android::gralloc4::encodeCrop(crops, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Dataspace) {
+ status = android::gralloc4::encodeDataspace(Dataspace::UNKNOWN, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_BlendMode) {
+ status = android::gralloc4::encodeBlendMode(BlendMode::INVALID, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Smpte2086) {
+ status = android::gralloc4::encodeSmpte2086(std::nullopt, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Cta861_3) {
+ status = android::gralloc4::encodeCta861_3(std::nullopt, &encodedMetadata);
+ } else if (metadataType == android::gralloc4::MetadataType_Smpte2094_40) {
+ status = android::gralloc4::encodeSmpte2094_40(std::nullopt, &encodedMetadata);
+ } else {
+ hidlCb(Error::UNSUPPORTED, encodedMetadata);
+ return Void();
+ }
+
+ if (status != android::NO_ERROR) {
+ hidlCb(Error::NO_RESOURCES, encodedMetadata);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, encodedMetadata);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::listSupportedMetadataTypes(listSupportedMetadataTypes_cb hidlCb) {
+ hidl_vec<MetadataTypeDescription> supported;
+
+ if (!mDriver) {
+ drv_log("Failed to listSupportedMetadataTypes. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, supported);
+ return Void();
+ }
+
+ supported = hidl_vec<IMapper::MetadataTypeDescription>({
+ {
+ android::gralloc4::MetadataType_BufferId,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Name,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Width,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Height,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_LayerCount,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_PixelFormatRequested,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_PixelFormatFourCC,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_PixelFormatModifier,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Usage,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_AllocationSize,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_ProtectedContent,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Compression,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Interlaced,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_ChromaSiting,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_PlaneLayouts,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Dataspace,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_BlendMode,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Smpte2086,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Cta861_3,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ {
+ android::gralloc4::MetadataType_Smpte2094_40,
+ "",
+ /*isGettable=*/true,
+ /*isSettable=*/false,
+ },
+ });
+
+ hidlCb(Error::NONE, supported);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::dumpBuffer(void* rawHandle, dumpBuffer_cb hidlCb) {
+ BufferDump bufferDump;
+
+ if (!mDriver) {
+ drv_log("Failed to dumpBuffer. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, bufferDump);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to dumpBuffer. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, bufferDump);
+ return Void();
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (!crosHandle) {
+ drv_log("Failed to dumpBuffer. Invalid handle.\n");
+ hidlCb(Error::BAD_BUFFER, bufferDump);
+ return Void();
+ }
+
+ return dumpBuffer(crosHandle, hidlCb);
+}
+
+Return<void> CrosGralloc4Mapper::dumpBuffer(cros_gralloc_handle_t crosHandle,
+ dumpBuffer_cb hidlCb) {
+ BufferDump bufferDump;
+
+ if (!mDriver) {
+ drv_log("Failed to dumpBuffer. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, bufferDump);
+ return Void();
+ }
+
+ if (!crosHandle) {
+ drv_log("Failed to dumpBuffer. Invalid handle.\n");
+ hidlCb(Error::BAD_BUFFER, bufferDump);
+ return Void();
+ }
+
+ std::vector<MetadataDump> metadataDumps;
+
+ MetadataType metadataType = android::gralloc4::MetadataType_BufferId;
+ auto metadata_get_callback = [&](Error, hidl_vec<uint8_t> metadata) {
+ MetadataDump metadataDump;
+ metadataDump.metadataType = metadataType;
+ metadataDump.metadata = metadata;
+ metadataDumps.push_back(metadataDump);
+ };
+
+ metadataType = android::gralloc4::MetadataType_BufferId;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_Name;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_Width;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_Height;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_LayerCount;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_PixelFormatRequested;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_PixelFormatFourCC;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_PixelFormatModifier;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_Usage;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_AllocationSize;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_ProtectedContent;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_Compression;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_Interlaced;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_ChromaSiting;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_PlaneLayouts;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_Dataspace;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ metadataType = android::gralloc4::MetadataType_BlendMode;
+ get(crosHandle, metadataType, metadata_get_callback);
+
+ bufferDump.metadataDump = metadataDumps;
+ hidlCb(Error::NONE, bufferDump);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::dumpBuffers(dumpBuffers_cb hidlCb) {
+ std::vector<BufferDump> bufferDumps;
+
+ if (!mDriver) {
+ drv_log("Failed to dumpBuffers. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, bufferDumps);
+ return Void();
+ }
+
+ Error error = Error::NONE;
+
+ auto handleCallback = [&](cros_gralloc_handle_t crosHandle) {
+ auto dumpBufferCallback = [&](Error err, BufferDump bufferDump) {
+ error = err;
+ if (error == Error::NONE) {
+ bufferDumps.push_back(bufferDump);
+ }
+ };
+
+ dumpBuffer(crosHandle, dumpBufferCallback);
+ };
+ mDriver->for_each_handle(handleCallback);
+
+ hidlCb(error, bufferDumps);
+ return Void();
+}
+
+Return<void> CrosGralloc4Mapper::getReservedRegion(void* rawHandle, getReservedRegion_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to getReservedRegion. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr, 0);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to getReservedRegion. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr, 0);
+ return Void();
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (!crosHandle) {
+ drv_log("Failed to getReservedRegion. Invalid handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr, 0);
+ return Void();
+ }
+
+ void* reservedRegionAddr = nullptr;
+ uint64_t reservedRegionSize = 0;
+ int ret = mDriver->get_reserved_region(bufferHandle, &reservedRegionAddr, &reservedRegionSize);
+ if (ret) {
+ drv_log("Failed to getReservedRegion.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr, 0);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, reservedRegionAddr, reservedRegionSize);
+ return Void();
+}
+
+android::hardware::graphics::mapper::V4_0::IMapper* HIDL_FETCH_IMapper(const char* /*name*/) {
+ return static_cast<android::hardware::graphics::mapper::V4_0::IMapper*>(new CrosGralloc4Mapper);
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Mapper.h b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Mapper.h
new file mode 100644
index 00000000000..b3189302647
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Mapper.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <android/hardware/graphics/mapper/4.0/IMapper.h>
+
+#include "cros_gralloc/cros_gralloc_driver.h"
+#include "cros_gralloc/cros_gralloc_handle.h"
+
+class CrosGralloc4Mapper : public android::hardware::graphics::mapper::V4_0::IMapper {
+ public:
+ CrosGralloc4Mapper();
+
+ android::hardware::Return<void> createDescriptor(const BufferDescriptorInfo& description,
+ createDescriptor_cb hidlCb) override;
+
+ android::hardware::Return<void> importBuffer(const android::hardware::hidl_handle& rawHandle,
+ importBuffer_cb hidlCb) override;
+
+ android::hardware::Return<android::hardware::graphics::mapper::V4_0::Error> freeBuffer(
+ void* rawHandle) override;
+
+ android::hardware::Return<android::hardware::graphics::mapper::V4_0::Error> validateBufferSize(
+ void* rawHandle, const BufferDescriptorInfo& descriptor, uint32_t stride) override;
+
+ android::hardware::Return<void> getTransportSize(void* rawHandle,
+ getTransportSize_cb hidlCb) override;
+
+ android::hardware::Return<void> lock(void* rawHandle, uint64_t cpuUsage,
+ const Rect& accessRegion,
+ const android::hardware::hidl_handle& acquireFence,
+ lock_cb hidlCb) override;
+
+ android::hardware::Return<void> unlock(void* rawHandle, unlock_cb hidlCb) override;
+
+ android::hardware::Return<void> flushLockedBuffer(void* rawHandle,
+ flushLockedBuffer_cb hidlCb) override;
+
+ android::hardware::Return<android::hardware::graphics::mapper::V4_0::Error> rereadLockedBuffer(
+ void* rawHandle) override;
+
+ android::hardware::Return<void> isSupported(const BufferDescriptorInfo& descriptor,
+ isSupported_cb hidlCb) override;
+
+ android::hardware::Return<void> get(void* rawHandle, const MetadataType& metadataType,
+ get_cb hidlCb) override;
+
+ android::hardware::Return<android::hardware::graphics::mapper::V4_0::Error> set(
+ void* rawHandle, const MetadataType& metadataType,
+ const android::hardware::hidl_vec<uint8_t>& metadata) override;
+
+ android::hardware::Return<void> getFromBufferDescriptorInfo(
+ const BufferDescriptorInfo& descriptor, const MetadataType& metadataType,
+ getFromBufferDescriptorInfo_cb hidlCb) override;
+
+ android::hardware::Return<void> listSupportedMetadataTypes(
+ listSupportedMetadataTypes_cb hidlCb) override;
+
+ android::hardware::Return<void> dumpBuffer(void* rawHandle, dumpBuffer_cb hidlCb) override;
+ android::hardware::Return<void> dumpBuffers(dumpBuffers_cb hidlCb) override;
+
+ android::hardware::Return<void> getReservedRegion(void* rawHandle,
+ getReservedRegion_cb hidlCb) override;
+
+ private:
+ android::hardware::Return<void> get(cros_gralloc_handle_t crosHandle,
+ const MetadataType& metadataType, get_cb hidlCb);
+
+ android::hardware::Return<void> dumpBuffer(cros_gralloc_handle_t crosHandle,
+ dumpBuffer_cb hidlCb);
+
+ int getResolvedDrmFormat(android::hardware::graphics::common::V1_2::PixelFormat pixelFormat,
+ uint64_t bufferUsage, uint32_t* outDrmFormat);
+
+ std::unique_ptr<cros_gralloc_driver> mDriver;
+};
+
+extern "C" android::hardware::graphics::mapper::V4_0::IMapper* HIDL_FETCH_IMapper(const char* name);
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Utils.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Utils.cc
new file mode 100644
index 00000000000..5e37b7e127d
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Utils.cc
@@ -0,0 +1,671 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc/gralloc4/CrosGralloc4Utils.h"
+
+#include <array>
+#include <unordered_map>
+
+#include <aidl/android/hardware/graphics/common/PlaneLayoutComponent.h>
+#include <aidl/android/hardware/graphics/common/PlaneLayoutComponentType.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+#include <cutils/native_handle.h>
+#include <gralloctypes/Gralloc4.h>
+
+#include "cros_gralloc/cros_gralloc_helpers.h"
+
+using aidl::android::hardware::graphics::common::PlaneLayout;
+using aidl::android::hardware::graphics::common::PlaneLayoutComponent;
+using aidl::android::hardware::graphics::common::PlaneLayoutComponentType;
+using android::hardware::hidl_bitfield;
+using android::hardware::hidl_handle;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::common::V1_2::PixelFormat;
+
+using BufferDescriptorInfo =
+ android::hardware::graphics::mapper::V4_0::IMapper::BufferDescriptorInfo;
+
+std::string getPixelFormatString(PixelFormat format) {
+ switch (format) {
+ case PixelFormat::BGRA_8888:
+ return "PixelFormat::BGRA_8888";
+ case PixelFormat::BLOB:
+ return "PixelFormat::BLOB";
+ case PixelFormat::DEPTH_16:
+ return "PixelFormat::DEPTH_16";
+ case PixelFormat::DEPTH_24:
+ return "PixelFormat::DEPTH_24";
+ case PixelFormat::DEPTH_24_STENCIL_8:
+ return "PixelFormat::DEPTH_24_STENCIL_8";
+ case PixelFormat::DEPTH_32F:
+ return "PixelFormat::DEPTH_24";
+ case PixelFormat::DEPTH_32F_STENCIL_8:
+ return "PixelFormat::DEPTH_24_STENCIL_8";
+ case PixelFormat::HSV_888:
+ return "PixelFormat::HSV_888";
+ case PixelFormat::IMPLEMENTATION_DEFINED:
+ return "PixelFormat::IMPLEMENTATION_DEFINED";
+ case PixelFormat::RAW10:
+ return "PixelFormat::RAW10";
+ case PixelFormat::RAW12:
+ return "PixelFormat::RAW12";
+ case PixelFormat::RAW16:
+ return "PixelFormat::RAW16";
+ case PixelFormat::RAW_OPAQUE:
+ return "PixelFormat::RAW_OPAQUE";
+ case PixelFormat::RGBA_1010102:
+ return "PixelFormat::RGBA_1010102";
+ case PixelFormat::RGBA_8888:
+ return "PixelFormat::RGBA_8888";
+ case PixelFormat::RGBA_FP16:
+ return "PixelFormat::RGBA_FP16";
+ case PixelFormat::RGBX_8888:
+ return "PixelFormat::RGBX_8888";
+ case PixelFormat::RGB_565:
+ return "PixelFormat::RGB_565";
+ case PixelFormat::RGB_888:
+ return "PixelFormat::RGB_888";
+ case PixelFormat::STENCIL_8:
+ return "PixelFormat::STENCIL_8";
+ case PixelFormat::Y16:
+ return "PixelFormat::Y16";
+ case PixelFormat::Y8:
+ return "PixelFormat::Y8";
+ case PixelFormat::YCBCR_420_888:
+ return "PixelFormat::YCBCR_420_888";
+ case PixelFormat::YCBCR_422_I:
+ return "PixelFormat::YCBCR_422_I";
+ case PixelFormat::YCBCR_422_SP:
+ return "PixelFormat::YCBCR_422_SP";
+ case PixelFormat::YCBCR_P010:
+ return "PixelFormat::YCBCR_P010";
+ case PixelFormat::YCRCB_420_SP:
+ return "PixelFormat::YCRCB_420_SP";
+ case PixelFormat::YV12:
+ return "PixelFormat::YV12";
+ }
+ return android::base::StringPrintf("PixelFormat::Unknown(%d)", static_cast<uint32_t>(format));
+}
+
+std::string getUsageString(hidl_bitfield<BufferUsage> bufferUsage) {
+ using Underlying = typename std::underlying_type<BufferUsage>::type;
+
+ Underlying usage = static_cast<Underlying>(bufferUsage);
+
+ std::vector<std::string> usages;
+ if (usage & BufferUsage::CAMERA_INPUT) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CAMERA_INPUT);
+ usages.push_back("BufferUsage::CAMERA_INPUT");
+ }
+ if (usage & BufferUsage::CAMERA_OUTPUT) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CAMERA_OUTPUT);
+ usages.push_back("BufferUsage::CAMERA_OUTPUT");
+ }
+ if (usage & BufferUsage::COMPOSER_CURSOR) {
+ usage &= ~static_cast<Underlying>(BufferUsage::COMPOSER_CURSOR);
+ usages.push_back("BufferUsage::COMPOSER_CURSOR");
+ }
+ if (usage & BufferUsage::COMPOSER_OVERLAY) {
+ usage &= ~static_cast<Underlying>(BufferUsage::COMPOSER_OVERLAY);
+ usages.push_back("BufferUsage::COMPOSER_OVERLAY");
+ }
+ if (usage & BufferUsage::CPU_READ_OFTEN) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_READ_OFTEN);
+ usages.push_back("BufferUsage::CPU_READ_OFTEN");
+ }
+ if (usage & BufferUsage::CPU_READ_NEVER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_READ_NEVER);
+ usages.push_back("BufferUsage::CPU_READ_NEVER");
+ }
+ if (usage & BufferUsage::CPU_READ_RARELY) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_READ_RARELY);
+ usages.push_back("BufferUsage::CPU_READ_RARELY");
+ }
+ if (usage & BufferUsage::CPU_WRITE_NEVER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_WRITE_NEVER);
+ usages.push_back("BufferUsage::CPU_WRITE_NEVER");
+ }
+ if (usage & BufferUsage::CPU_WRITE_OFTEN) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_WRITE_OFTEN);
+ usages.push_back("BufferUsage::CPU_WRITE_OFTEN");
+ }
+ if (usage & BufferUsage::CPU_WRITE_RARELY) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_WRITE_RARELY);
+ usages.push_back("BufferUsage::CPU_WRITE_RARELY");
+ }
+ if (usage & BufferUsage::GPU_RENDER_TARGET) {
+ usage &= ~static_cast<Underlying>(BufferUsage::GPU_RENDER_TARGET);
+ usages.push_back("BufferUsage::GPU_RENDER_TARGET");
+ }
+ if (usage & BufferUsage::GPU_TEXTURE) {
+ usage &= ~static_cast<Underlying>(BufferUsage::GPU_TEXTURE);
+ usages.push_back("BufferUsage::GPU_TEXTURE");
+ }
+ if (usage & BufferUsage::PROTECTED) {
+ usage &= ~static_cast<Underlying>(BufferUsage::PROTECTED);
+ usages.push_back("BufferUsage::PROTECTED");
+ }
+ if (usage & BufferUsage::RENDERSCRIPT) {
+ usage &= ~static_cast<Underlying>(BufferUsage::RENDERSCRIPT);
+ usages.push_back("BufferUsage::RENDERSCRIPT");
+ }
+ if (usage & BufferUsage::VIDEO_DECODER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::VIDEO_DECODER);
+ usages.push_back("BufferUsage::VIDEO_DECODER");
+ }
+ if (usage & BufferUsage::VIDEO_ENCODER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::VIDEO_ENCODER);
+ usages.push_back("BufferUsage::VIDEO_ENCODER");
+ }
+
+ if (usage) {
+ usages.push_back(android::base::StringPrintf("UnknownUsageBits-%" PRIu64, usage));
+ }
+
+ return android::base::Join(usages, '|');
+}
+
+int convertToDrmFormat(PixelFormat format, uint32_t* outDrmFormat) {
+ switch (format) {
+ case PixelFormat::BGRA_8888:
+ *outDrmFormat = DRM_FORMAT_ARGB8888;
+ return 0;
+ /**
+ * Choose DRM_FORMAT_R8 because <system/graphics.h> requires the buffers
+ * with a format HAL_PIXEL_FORMAT_BLOB have a height of 1, and width
+ * equal to their size in bytes.
+ */
+ case PixelFormat::BLOB:
+ *outDrmFormat = DRM_FORMAT_R8;
+ return 0;
+ case PixelFormat::DEPTH_16:
+ return -EINVAL;
+ case PixelFormat::DEPTH_24:
+ return -EINVAL;
+ case PixelFormat::DEPTH_24_STENCIL_8:
+ return -EINVAL;
+ case PixelFormat::DEPTH_32F:
+ return -EINVAL;
+ case PixelFormat::DEPTH_32F_STENCIL_8:
+ return -EINVAL;
+ case PixelFormat::HSV_888:
+ return -EINVAL;
+ case PixelFormat::IMPLEMENTATION_DEFINED:
+ *outDrmFormat = DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED;
+ return 0;
+ case PixelFormat::RAW10:
+ return -EINVAL;
+ case PixelFormat::RAW12:
+ return -EINVAL;
+ case PixelFormat::RAW16:
+ *outDrmFormat = DRM_FORMAT_R16;
+ return 0;
+ /* TODO use blob */
+ case PixelFormat::RAW_OPAQUE:
+ return -EINVAL;
+ case PixelFormat::RGBA_1010102:
+ *outDrmFormat = DRM_FORMAT_ABGR2101010;
+ return 0;
+ case PixelFormat::RGBA_8888:
+ *outDrmFormat = DRM_FORMAT_ABGR8888;
+ return 0;
+ case PixelFormat::RGBA_FP16:
+ *outDrmFormat = DRM_FORMAT_ABGR16161616F;
+ return 0;
+ case PixelFormat::RGBX_8888:
+ *outDrmFormat = DRM_FORMAT_XBGR8888;
+ return 0;
+ case PixelFormat::RGB_565:
+ *outDrmFormat = DRM_FORMAT_RGB565;
+ return 0;
+ case PixelFormat::RGB_888:
+ *outDrmFormat = DRM_FORMAT_RGB888;
+ return 0;
+ case PixelFormat::STENCIL_8:
+ return -EINVAL;
+ case PixelFormat::Y16:
+ *outDrmFormat = DRM_FORMAT_R16;
+ return 0;
+ case PixelFormat::Y8:
+ *outDrmFormat = DRM_FORMAT_R8;
+ return 0;
+ case PixelFormat::YCBCR_420_888:
+ *outDrmFormat = DRM_FORMAT_FLEX_YCbCr_420_888;
+ return 0;
+ case PixelFormat::YCBCR_422_SP:
+ return -EINVAL;
+ case PixelFormat::YCBCR_422_I:
+ return -EINVAL;
+ case PixelFormat::YCBCR_P010:
+ *outDrmFormat = DRM_FORMAT_P010;
+ return 0;
+ case PixelFormat::YCRCB_420_SP:
+ *outDrmFormat = DRM_FORMAT_NV21;
+ return 0;
+ case PixelFormat::YV12:
+ *outDrmFormat = DRM_FORMAT_YVU420_ANDROID;
+ return 0;
+ };
+ return -EINVAL;
+}
+
+int convertToBufferUsage(uint64_t grallocUsage, uint64_t* outBufferUsage) {
+ uint64_t bufferUsage = BO_USE_NONE;
+
+ if ((grallocUsage & BufferUsage::CPU_READ_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_READ_RARELY)) {
+ bufferUsage |= BO_USE_SW_READ_RARELY;
+ }
+ if ((grallocUsage & BufferUsage::CPU_READ_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_READ_OFTEN)) {
+ bufferUsage |= BO_USE_SW_READ_OFTEN;
+ }
+ if ((grallocUsage & BufferUsage::CPU_WRITE_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_WRITE_RARELY)) {
+ bufferUsage |= BO_USE_SW_WRITE_RARELY;
+ }
+ if ((grallocUsage & BufferUsage::CPU_WRITE_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_WRITE_OFTEN)) {
+ bufferUsage |= BO_USE_SW_WRITE_OFTEN;
+ }
+ if (grallocUsage & BufferUsage::GPU_TEXTURE) {
+ bufferUsage |= BO_USE_TEXTURE;
+ }
+ if (grallocUsage & BufferUsage::GPU_RENDER_TARGET) {
+ bufferUsage |= BO_USE_RENDERING;
+ }
+ if (grallocUsage & BufferUsage::COMPOSER_OVERLAY) {
+ /* HWC wants to use display hardware, but can defer to OpenGL. */
+ bufferUsage |= BO_USE_SCANOUT | BO_USE_TEXTURE;
+ }
+ /* Map this flag to linear until real HW protection is available on Android. */
+ if (grallocUsage & BufferUsage::PROTECTED) {
+ bufferUsage |= BO_USE_LINEAR;
+ }
+ if (grallocUsage & BufferUsage::COMPOSER_CURSOR) {
+ bufferUsage |= BO_USE_NONE;
+ }
+ if (grallocUsage & BufferUsage::VIDEO_ENCODER) {
+ /*HACK: See b/30054495 */
+ bufferUsage |= BO_USE_SW_READ_OFTEN;
+ }
+ if (grallocUsage & BufferUsage::CAMERA_OUTPUT) {
+ bufferUsage |= BO_USE_CAMERA_WRITE;
+ }
+ if (grallocUsage & BufferUsage::CAMERA_INPUT) {
+ bufferUsage |= BO_USE_CAMERA_READ;
+ }
+ if (grallocUsage & BufferUsage::RENDERSCRIPT) {
+ bufferUsage |= BO_USE_RENDERSCRIPT;
+ }
+ if (grallocUsage & BufferUsage::VIDEO_DECODER) {
+ bufferUsage |= BO_USE_HW_VIDEO_DECODER;
+ }
+
+ *outBufferUsage = bufferUsage;
+ return 0;
+}
+
+int convertToCrosDescriptor(const BufferDescriptorInfo& descriptor,
+ struct cros_gralloc_buffer_descriptor* outCrosDescriptor) {
+ outCrosDescriptor->name = descriptor.name;
+ outCrosDescriptor->width = descriptor.width;
+ outCrosDescriptor->height = descriptor.height;
+ outCrosDescriptor->droid_format = static_cast<int32_t>(descriptor.format);
+ outCrosDescriptor->droid_usage = descriptor.usage;
+ outCrosDescriptor->reserved_region_size = descriptor.reservedSize;
+ if (descriptor.layerCount > 1) {
+ drv_log("Failed to convert descriptor. Unsupported layerCount: %d\n",
+ descriptor.layerCount);
+ return -1;
+ }
+ if (convertToDrmFormat(descriptor.format, &outCrosDescriptor->drm_format)) {
+ std::string pixelFormatString = getPixelFormatString(descriptor.format);
+ drv_log("Failed to convert descriptor. Unsupported format %s\n", pixelFormatString.c_str());
+ return -1;
+ }
+ if (convertToBufferUsage(descriptor.usage, &outCrosDescriptor->use_flags)) {
+ std::string usageString = getUsageString(descriptor.usage);
+ drv_log("Failed to convert descriptor. Unsupported usage flags %s\n", usageString.c_str());
+ return -1;
+ }
+ return 0;
+}
+
+int convertToMapUsage(uint64_t grallocUsage, uint32_t* outMapUsage) {
+ uint32_t mapUsage = BO_MAP_NONE;
+
+ if (grallocUsage & BufferUsage::CPU_READ_MASK) {
+ mapUsage |= BO_MAP_READ;
+ }
+ if (grallocUsage & BufferUsage::CPU_WRITE_MASK) {
+ mapUsage |= BO_MAP_WRITE;
+ }
+
+ *outMapUsage = mapUsage;
+ return 0;
+}
+
+int convertToFenceFd(const hidl_handle& fenceHandle, int* outFenceFd) {
+ if (!outFenceFd) {
+ return -EINVAL;
+ }
+
+ const native_handle_t* nativeHandle = fenceHandle.getNativeHandle();
+ if (nativeHandle && nativeHandle->numFds > 1) {
+ return -EINVAL;
+ }
+
+ *outFenceFd = (nativeHandle && nativeHandle->numFds == 1) ? nativeHandle->data[0] : -1;
+ return 0;
+}
+
+int convertToFenceHandle(int fenceFd, hidl_handle* outFenceHandle) {
+ if (!outFenceHandle) {
+ return -EINVAL;
+ }
+ if (fenceFd < 0) {
+ return 0;
+ }
+
+ NATIVE_HANDLE_DECLARE_STORAGE(handleStorage, 1, 0);
+ auto fenceHandle = native_handle_init(handleStorage, 1, 0);
+ fenceHandle->data[0] = fenceFd;
+
+ *outFenceHandle = fenceHandle;
+ return 0;
+}
+
+const std::unordered_map<uint32_t, std::vector<PlaneLayout>>& GetPlaneLayoutsMap() {
+ static const auto* kPlaneLayoutsMap =
+ new std::unordered_map<uint32_t, std::vector<PlaneLayout>>({
+ {DRM_FORMAT_ABGR8888,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R,
+ .offsetInBits = 0,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_G,
+ .offsetInBits = 8,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_B,
+ .offsetInBits = 16,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_A,
+ .offsetInBits = 24,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 32,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ }}},
+
+ {DRM_FORMAT_ABGR2101010,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R,
+ .offsetInBits = 0,
+ .sizeInBits = 10},
+ {.type = android::gralloc4::PlaneLayoutComponentType_G,
+ .offsetInBits = 10,
+ .sizeInBits = 10},
+ {.type = android::gralloc4::PlaneLayoutComponentType_B,
+ .offsetInBits = 20,
+ .sizeInBits = 10},
+ {.type = android::gralloc4::PlaneLayoutComponentType_A,
+ .offsetInBits = 30,
+ .sizeInBits = 2}},
+ .sampleIncrementInBits = 32,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ }}},
+
+ {DRM_FORMAT_ABGR16161616F,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R,
+ .offsetInBits = 0,
+ .sizeInBits = 16},
+ {.type = android::gralloc4::PlaneLayoutComponentType_G,
+ .offsetInBits = 16,
+ .sizeInBits = 16},
+ {.type = android::gralloc4::PlaneLayoutComponentType_B,
+ .offsetInBits = 32,
+ .sizeInBits = 16},
+ {.type = android::gralloc4::PlaneLayoutComponentType_A,
+ .offsetInBits = 48,
+ .sizeInBits = 16}},
+ .sampleIncrementInBits = 64,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ }}},
+
+ {DRM_FORMAT_ARGB8888,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_B,
+ .offsetInBits = 0,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_G,
+ .offsetInBits = 8,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_R,
+ .offsetInBits = 16,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_A,
+ .offsetInBits = 24,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 32,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ }}},
+
+ {DRM_FORMAT_NV12,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_Y,
+ .offsetInBits = 0,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 8,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ },
+ {
+ .components =
+ {{.type = android::gralloc4::PlaneLayoutComponentType_CB,
+ .offsetInBits = 0,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_CR,
+ .offsetInBits = 8,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 16,
+ .horizontalSubsampling = 2,
+ .verticalSubsampling = 2,
+ }}},
+
+ {DRM_FORMAT_NV21,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_Y,
+ .offsetInBits = 0,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 8,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ },
+ {
+ .components =
+ {{.type = android::gralloc4::PlaneLayoutComponentType_CR,
+ .offsetInBits = 0,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_CB,
+ .offsetInBits = 8,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 16,
+ .horizontalSubsampling = 2,
+ .verticalSubsampling = 2,
+ }}},
+
+ {DRM_FORMAT_P010,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_Y,
+ .offsetInBits = 6,
+ .sizeInBits = 10}},
+ .sampleIncrementInBits = 16,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ },
+ {
+ .components =
+ {{.type = android::gralloc4::PlaneLayoutComponentType_CB,
+ .offsetInBits = 6,
+ .sizeInBits = 10},
+ {.type = android::gralloc4::PlaneLayoutComponentType_CR,
+ .offsetInBits = 22,
+ .sizeInBits = 10}},
+ .sampleIncrementInBits = 32,
+ .horizontalSubsampling = 2,
+ .verticalSubsampling = 2,
+ }}},
+
+ {DRM_FORMAT_R8,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R,
+ .offsetInBits = 0,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 8,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ }}},
+
+ {DRM_FORMAT_R16,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R,
+ .offsetInBits = 0,
+ .sizeInBits = 16}},
+ .sampleIncrementInBits = 16,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ }}},
+
+ {DRM_FORMAT_RGB565,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R,
+ .offsetInBits = 0,
+ .sizeInBits = 5},
+ {.type = android::gralloc4::PlaneLayoutComponentType_G,
+ .offsetInBits = 5,
+ .sizeInBits = 6},
+ {.type = android::gralloc4::PlaneLayoutComponentType_B,
+ .offsetInBits = 11,
+ .sizeInBits = 5}},
+ .sampleIncrementInBits = 16,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ }}},
+
+ {DRM_FORMAT_RGB888,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R,
+ .offsetInBits = 0,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_G,
+ .offsetInBits = 8,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_B,
+ .offsetInBits = 16,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 24,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ }}},
+
+ {DRM_FORMAT_XBGR8888,
+ {{
+ .components = {{.type = android::gralloc4::PlaneLayoutComponentType_B,
+ .offsetInBits = 0,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_G,
+ .offsetInBits = 8,
+ .sizeInBits = 8},
+ {.type = android::gralloc4::PlaneLayoutComponentType_R,
+ .offsetInBits = 16,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 32,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ }}},
+
+ {DRM_FORMAT_YVU420,
+ {
+ {
+ .components = {{.type = android::gralloc4::
+ PlaneLayoutComponentType_Y,
+ .offsetInBits = 0,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 8,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ },
+ {
+ .components = {{.type = android::gralloc4::
+ PlaneLayoutComponentType_CB,
+ .offsetInBits = 0,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 8,
+ .horizontalSubsampling = 2,
+ .verticalSubsampling = 2,
+ },
+ {
+ .components = {{.type = android::gralloc4::
+ PlaneLayoutComponentType_CR,
+ .offsetInBits = 0,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 8,
+ .horizontalSubsampling = 2,
+ .verticalSubsampling = 2,
+ },
+ }},
+
+ {DRM_FORMAT_YVU420_ANDROID,
+ {
+ {
+ .components = {{.type = android::gralloc4::
+ PlaneLayoutComponentType_Y,
+ .offsetInBits = 0,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 8,
+ .horizontalSubsampling = 1,
+ .verticalSubsampling = 1,
+ },
+ {
+ .components = {{.type = android::gralloc4::
+ PlaneLayoutComponentType_CR,
+ .offsetInBits = 0,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 8,
+ .horizontalSubsampling = 2,
+ .verticalSubsampling = 2,
+ },
+ {
+ .components = {{.type = android::gralloc4::
+ PlaneLayoutComponentType_CB,
+ .offsetInBits = 0,
+ .sizeInBits = 8}},
+ .sampleIncrementInBits = 8,
+ .horizontalSubsampling = 2,
+ .verticalSubsampling = 2,
+ },
+ }},
+ });
+ return *kPlaneLayoutsMap;
+}
+
+int getPlaneLayouts(uint32_t drmFormat, std::vector<PlaneLayout>* outPlaneLayouts) {
+ const auto& planeLayoutsMap = GetPlaneLayoutsMap();
+ const auto it = planeLayoutsMap.find(drmFormat);
+ if (it == planeLayoutsMap.end()) {
+ drv_log("Unknown plane layout for format %d\n", drmFormat);
+ return -1;
+ }
+
+ *outPlaneLayouts = it->second;
+ return 0;
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Utils.h b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Utils.h
new file mode 100644
index 00000000000..370922c60dd
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/CrosGralloc4Utils.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <string>
+#include <vector>
+
+#include <aidl/android/hardware/graphics/common/PlaneLayout.h>
+#include <android/hardware/graphics/common/1.2/types.h>
+#include <android/hardware/graphics/mapper/4.0/IMapper.h>
+
+#include "cros_gralloc/cros_gralloc_types.h"
+
+std::string getPixelFormatString(android::hardware::graphics::common::V1_2::PixelFormat format);
+
+std::string getUsageString(
+ android::hardware::hidl_bitfield<android::hardware::graphics::common::V1_2::BufferUsage>
+ usage);
+
+int convertToDrmFormat(android::hardware::graphics::common::V1_2::PixelFormat format,
+ uint32_t* outDrmFormat);
+
+int convertToBufferUsage(uint64_t grallocUsage, uint64_t* outBufferUsage);
+
+int convertToCrosDescriptor(
+ const android::hardware::graphics::mapper::V4_0::IMapper::BufferDescriptorInfo& descriptor,
+ struct cros_gralloc_buffer_descriptor* outCrosDescriptor);
+
+int convertToMapUsage(uint64_t grallocUsage, uint32_t* outMapUsage);
+
+int convertToFenceFd(const android::hardware::hidl_handle& fence_handle, int* out_fence_fd);
+
+int convertToFenceHandle(int fence_fd, android::hardware::hidl_handle* out_fence_handle);
+
+int getPlaneLayouts(
+ uint32_t drm_format,
+ std::vector<aidl::android::hardware::graphics::common::PlaneLayout>* out_layouts);
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm.rc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm.rc
new file mode 100644
index 00000000000..a96a6e1b48b
--- /dev/null
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm.rc
@@ -0,0 +1,24 @@
+#
+# Copyright 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+service vendor.graphics.allocator-4-0 /vendor/bin/hw/android.hardware.graphics.allocator@4.0-service.minigbm
+ interface android.hardware.graphics.allocator@4.0::IAllocator default
+ class hal animation
+ user system
+ group graphics drmrpc
+ capabilities SYS_NICE
+ onrestart restart surfaceflinger
+ writepid /dev/cpuset/system-background/tasks
diff --git a/chromium/third_party/minigbm/src/dri.c b/chromium/third_party/minigbm/src/dri.c
index 97dc567e6ba..dfcfb60b209 100644
--- a/chromium/third_party/minigbm/src/dri.c
+++ b/chromium/third_party/minigbm/src/dri.c
@@ -34,7 +34,9 @@ static const struct {
{ DRM_FORMAT_XBGR8888, __DRI_IMAGE_FORMAT_XBGR8888 },
{ DRM_FORMAT_ABGR8888, __DRI_IMAGE_FORMAT_ABGR8888 },
{ DRM_FORMAT_XRGB2101010, __DRI_IMAGE_FORMAT_XRGB2101010 },
+ { DRM_FORMAT_XBGR2101010, __DRI_IMAGE_FORMAT_XBGR2101010 },
{ DRM_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ARGB2101010 },
+ { DRM_FORMAT_ABGR2101010, __DRI_IMAGE_FORMAT_ABGR2101010 },
};
static int drm_format_to_dri_format(uint32_t drm_format)
@@ -69,10 +71,9 @@ static bool lookup_extension(const __DRIextension *const *extensions, const char
*/
static void close_gem_handle(uint32_t handle, int fd)
{
- struct drm_gem_close gem_close;
+ struct drm_gem_close gem_close = { 0 };
int ret = 0;
- memset(&gem_close, 0, sizeof(gem_close));
gem_close.handle = handle;
ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
if (ret)
diff --git a/chromium/third_party/minigbm/src/drv.c b/chromium/third_party/minigbm/src/drv.c
index 920cf4db4e7..5c8f9a0d5d9 100644
--- a/chromium/third_party/minigbm/src/drv.c
+++ b/chromium/third_party/minigbm/src/drv.c
@@ -28,44 +28,39 @@
#ifdef DRV_AMDGPU
extern const struct backend backend_amdgpu;
#endif
-extern const struct backend backend_evdi;
#ifdef DRV_EXYNOS
extern const struct backend backend_exynos;
#endif
#ifdef DRV_I915
extern const struct backend backend_i915;
#endif
-#ifdef DRV_MARVELL
-extern const struct backend backend_marvell;
-#endif
#ifdef DRV_MEDIATEK
extern const struct backend backend_mediatek;
#endif
-#ifdef DRV_MESON
-extern const struct backend backend_meson;
-#endif
#ifdef DRV_MSM
extern const struct backend backend_msm;
#endif
-extern const struct backend backend_nouveau;
-#ifdef DRV_RADEON
-extern const struct backend backend_radeon;
-#endif
#ifdef DRV_ROCKCHIP
extern const struct backend backend_rockchip;
#endif
-#ifdef DRV_SYNAPTICS
-extern const struct backend backend_synaptics;
-#endif
#ifdef DRV_TEGRA
extern const struct backend backend_tegra;
#endif
-extern const struct backend backend_udl;
#ifdef DRV_VC4
extern const struct backend backend_vc4;
#endif
-extern const struct backend backend_vgem;
+
+// Dumb / generic drivers
+extern const struct backend backend_evdi;
+extern const struct backend backend_marvell;
+extern const struct backend backend_meson;
+extern const struct backend backend_nouveau;
+extern const struct backend backend_komeda;
+extern const struct backend backend_radeon;
+extern const struct backend backend_synaptics;
extern const struct backend backend_virtio_gpu;
+extern const struct backend backend_udl;
+extern const struct backend backend_vkms;
static const struct backend *drv_get_backend(int fd)
{
@@ -81,43 +76,27 @@ static const struct backend *drv_get_backend(int fd)
#ifdef DRV_AMDGPU
&backend_amdgpu,
#endif
- &backend_evdi,
#ifdef DRV_EXYNOS
&backend_exynos,
#endif
#ifdef DRV_I915
&backend_i915,
#endif
-#ifdef DRV_MARVELL
- &backend_marvell,
-#endif
#ifdef DRV_MEDIATEK
&backend_mediatek,
#endif
-#ifdef DRV_MESON
- &backend_meson,
-#endif
#ifdef DRV_MSM
&backend_msm,
#endif
- &backend_nouveau,
-#ifdef DRV_RADEON
- &backend_radeon,
-#endif
#ifdef DRV_ROCKCHIP
&backend_rockchip,
#endif
-#ifdef DRV_SYNAPTICS
- &backend_synaptics,
-#endif
-#ifdef DRV_TEGRA
- &backend_tegra,
-#endif
- &backend_udl,
#ifdef DRV_VC4
&backend_vc4,
#endif
- &backend_vgem, &backend_virtio_gpu,
+ &backend_evdi, &backend_marvell, &backend_meson, &backend_nouveau,
+ &backend_komeda, &backend_radeon, &backend_synaptics, &backend_virtio_gpu,
+ &backend_udl, &backend_virtio_gpu, &backend_vkms
};
for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
@@ -150,6 +129,10 @@ struct driver *drv_create(int fd)
if (!drv)
return NULL;
+ char *minigbm_debug;
+ minigbm_debug = getenv("MINIGBM_DEBUG");
+ drv->compression = (minigbm_debug == NULL) || (strcmp(minigbm_debug, "nocompression") != 0);
+
drv->fd = fd;
drv->backend = drv_get_backend(fd);
@@ -445,7 +428,7 @@ void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags
{
uint32_t i;
uint8_t *addr;
- struct mapping mapping;
+ struct mapping mapping = { 0 };
assert(rect->width >= 0);
assert(rect->height >= 0);
@@ -459,7 +442,6 @@ void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags
return MAP_FAILED;
}
- memset(&mapping, 0, sizeof(mapping));
mapping.rect = *rect;
mapping.refcount = 1;
@@ -558,6 +540,21 @@ int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
return ret;
}
+int drv_bo_flush(struct bo *bo, struct mapping *mapping)
+{
+ int ret = 0;
+
+ assert(mapping);
+ assert(mapping->vma);
+ assert(mapping->refcount > 0);
+ assert(mapping->vma->refcount > 0);
+
+ if (bo->drv->backend->bo_flush)
+ ret = bo->drv->backend->bo_flush(bo, mapping);
+
+ return ret;
+}
+
int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
{
int ret = 0;
@@ -648,6 +645,11 @@ uint32_t drv_bo_get_format(struct bo *bo)
return bo->meta.format;
}
+size_t drv_bo_get_total_size(struct bo *bo)
+{
+ return bo->meta.total_size;
+}
+
uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
if (drv->backend->resolve_format)
diff --git a/chromium/third_party/minigbm/src/drv.h b/chromium/third_party/minigbm/src/drv.h
index 2b86aad80ff..4a47b7628cd 100644
--- a/chromium/third_party/minigbm/src/drv.h
+++ b/chromium/third_party/minigbm/src/drv.h
@@ -89,6 +89,7 @@ struct drv_import_fd_data {
uint32_t width;
uint32_t height;
uint32_t format;
+ uint32_t tiling;
uint64_t use_flags;
};
@@ -145,6 +146,8 @@ int drv_bo_unmap(struct bo *bo, struct mapping *mapping);
int drv_bo_invalidate(struct bo *bo, struct mapping *mapping);
+int drv_bo_flush(struct bo *bo, struct mapping *mapping);
+
int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping);
uint32_t drv_bo_get_width(struct bo *bo);
diff --git a/chromium/third_party/minigbm/src/drv_priv.h b/chromium/third_party/minigbm/src/drv_priv.h
index 32c082d6692..6ce7fa10b0d 100644
--- a/chromium/third_party/minigbm/src/drv_priv.h
+++ b/chromium/third_party/minigbm/src/drv_priv.h
@@ -57,6 +57,7 @@ struct driver {
struct drv_array *mappings;
struct drv_array *combos;
pthread_mutex_t driver_lock;
+ bool compression;
};
struct backend {
@@ -85,19 +86,19 @@ struct backend {
};
// clang-format off
-#define BO_USE_RENDER_MASK (BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERING | \
- BO_USE_RENDERSCRIPT | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
- BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE)
+#define BO_USE_RENDER_MASK (BO_USE_LINEAR | BO_USE_RENDERING | BO_USE_RENDERSCRIPT | \
+ BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_RARELY | \
+ BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE)
-#define BO_USE_TEXTURE_MASK (BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERSCRIPT | \
- BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
- BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE)
+#define BO_USE_TEXTURE_MASK (BO_USE_LINEAR | BO_USE_RENDERSCRIPT | BO_USE_SW_READ_OFTEN | \
+ BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_RARELY | \
+ BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE)
#define BO_USE_SW_MASK (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
- BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY)
+ BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY)
#define BO_USE_NON_GPU_HW (BO_USE_SCANOUT | BO_USE_CAMERA_WRITE | BO_USE_CAMERA_READ | \
- BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)
+ BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)
#ifndef DRM_FORMAT_MOD_LINEAR
#define DRM_FORMAT_MOD_LINEAR DRM_FORMAT_MOD_NONE
diff --git a/chromium/third_party/minigbm/src/dumb_driver.c b/chromium/third_party/minigbm/src/dumb_driver.c
new file mode 100644
index 00000000000..f5a62aa4e97
--- /dev/null
+++ b/chromium/third_party/minigbm/src/dumb_driver.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+#define INIT_DUMB_DRIVER(driver) \
+ const struct backend backend_##driver = { \
+ .name = #driver, \
+ .init = dumb_driver_init, \
+ .bo_create = drv_dumb_bo_create, \
+ .bo_destroy = drv_dumb_bo_destroy, \
+ .bo_import = drv_prime_bo_import, \
+ .bo_map = drv_dumb_bo_map, \
+ .bo_unmap = drv_bo_munmap, \
+ };
+
+static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_BGR888, DRM_FORMAT_BGR565 };
+
+static const uint32_t texture_only_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21,
+ DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+
+static int dumb_driver_init(struct driver *drv)
+{
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+
+ drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats),
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
+ BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
+
+ return drv_modify_linear_combinations(drv);
+}
+
+INIT_DUMB_DRIVER(evdi)
+INIT_DUMB_DRIVER(komeda)
+INIT_DUMB_DRIVER(marvell)
+INIT_DUMB_DRIVER(meson)
+INIT_DUMB_DRIVER(nouveau)
+INIT_DUMB_DRIVER(radeon)
+INIT_DUMB_DRIVER(synaptics)
+INIT_DUMB_DRIVER(udl)
+INIT_DUMB_DRIVER(vkms)
diff --git a/chromium/third_party/minigbm/src/evdi.c b/chromium/third_party/minigbm/src/evdi.c
deleted file mode 100644
index bfa62a045c1..00000000000
--- a/chromium/third_party/minigbm/src/evdi.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2016 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
-
-static int evdi_init(struct driver *drv)
-{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
-
- return drv_modify_linear_combinations(drv);
-}
-
-const struct backend backend_evdi = {
- .name = "evdi",
- .init = evdi_init,
- .bo_create = drv_dumb_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
- .bo_unmap = drv_bo_munmap,
-};
diff --git a/chromium/third_party/minigbm/src/external/i915_drm.h b/chromium/third_party/minigbm/src/external/i915_drm.h
new file mode 100644
index 00000000000..f5991a8d40f
--- /dev/null
+++ b/chromium/third_party/minigbm/src/external/i915_drm.h
@@ -0,0 +1,2422 @@
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _UAPI_I915_DRM_H_
+#define _UAPI_I915_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+/**
+ * DOC: uevents generated by i915 on it's device node
+ *
+ * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
+ * event from the gpu l3 cache. Additional information supplied is ROW,
+ * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
+ * track of these events and if a specific cache-line seems to have a
+ * persistent error remap it with the l3 remapping tool supplied in
+ * intel-gpu-tools. The value supplied with the event is always 1.
+ *
+ * I915_ERROR_UEVENT - Generated upon error detection, currently only via
+ * hangcheck. The error detection event is a good indicator of when things
+ * began to go badly. The value supplied with the event is a 1 upon error
+ * detection, and a 0 upon reset completion, signifying no more error
+ * exists. NOTE: Disabling hangcheck or reset via module parameter will
+ * cause the related events to not be seen.
+ *
+ * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
+ * the GPU. The value supplied with the event is always 1. NOTE: Disable
+ * reset via module parameter will cause this event to not be seen.
+ */
+#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
+#define I915_ERROR_UEVENT "ERROR"
+#define I915_RESET_UEVENT "RESET"
+
+/*
+ * i915_user_extension: Base class for defining a chain of extensions
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the __user boundary with pointers encapsulated inside u64.
+ */
+struct i915_user_extension {
+ __u64 next_extension;
+ __u32 name;
+ __u32 flags; /* All undefined bits must be zero. */
+ __u32 rsvd[4]; /* Reserved for future use; must be zero. */
+};
+
+/*
+ * MOCS indexes used for GPU surfaces, defining the cacheability of the
+ * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
+ */
+enum i915_mocs_table_index {
+ /*
+ * Not cached anywhere, coherency between CPU and GPU accesses is
+ * guaranteed.
+ */
+ I915_MOCS_UNCACHED,
+ /*
+ * Cacheability and coherency controlled by the kernel automatically
+ * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
+ * usage of the surface (used for display scanout or not).
+ */
+ I915_MOCS_PTE,
+ /*
+ * Cached in all GPU caches available on the platform.
+ * Coherency between CPU and GPU accesses to the surface is not
+ * guaranteed without extra synchronization.
+ */
+ I915_MOCS_CACHED,
+};
+
+/*
+ * Different engines serve different roles, and there may be more than one
+ * engine serving each role. enum drm_i915_gem_engine_class provides a
+ * classification of the role of the engine, which may be used when requesting
+ * operations to be performed on a certain subset of engines, or for providing
+ * information about that group.
+ */
+enum drm_i915_gem_engine_class {
+ I915_ENGINE_CLASS_RENDER = 0,
+ I915_ENGINE_CLASS_COPY = 1,
+ I915_ENGINE_CLASS_VIDEO = 2,
+ I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
+
+ /* should be kept compact */
+
+ I915_ENGINE_CLASS_INVALID = -1
+};
+
+/*
+ * There may be more than one engine fulfilling any role within the system.
+ * Each engine of a class is given a unique instance number and therefore
+ * any engine can be specified by its class:instance tuplet. APIs that allow
+ * access to any engine in the system will use struct i915_engine_class_instance
+ * for this identification.
+ */
+struct i915_engine_class_instance {
+ __u16 engine_class; /* see enum drm_i915_gem_engine_class */
+ __u16 engine_instance;
+#define I915_ENGINE_CLASS_INVALID_NONE -1
+#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
+};
+
+/**
+ * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
+ *
+ */
+
+enum drm_i915_pmu_engine_sample {
+ I915_SAMPLE_BUSY = 0,
+ I915_SAMPLE_WAIT = 1,
+ I915_SAMPLE_SEMA = 2
+};
+
+#define I915_PMU_SAMPLE_BITS (4)
+#define I915_PMU_SAMPLE_MASK (0xf)
+#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
+#define I915_PMU_CLASS_SHIFT \
+ (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
+
+#define __I915_PMU_ENGINE(class, instance, sample) \
+ ((class) << I915_PMU_CLASS_SHIFT | \
+ (instance) << I915_PMU_SAMPLE_BITS | \
+ (sample))
+
+#define I915_PMU_ENGINE_BUSY(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
+
+#define I915_PMU_ENGINE_WAIT(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
+
+#define I915_PMU_ENGINE_SEMA(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
+
+#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+
+#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
+#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
+#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
+#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
+
+#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+
+/* Each region is a minimum of 16k, and there are at most 255 of them.
+ */
+#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
+ * of chars for next/prev indices */
+#define I915_LOG_MIN_TEX_REGION_SIZE 14
+
+typedef struct _drm_i915_init {
+ enum {
+ I915_INIT_DMA = 0x01,
+ I915_CLEANUP_DMA = 0x02,
+ I915_RESUME_DMA = 0x03
+ } func;
+ unsigned int mmio_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+ unsigned int chipset;
+} drm_i915_init_t;
+
+typedef struct _drm_i915_sarea {
+ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
+ int last_upload; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int ctxOwner; /* last context to upload state */
+ int texAge;
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+ int perf_boxes; /* performance boxes to be displayed */
+ int width, height; /* screen size in pixels */
+
+ drm_handle_t front_handle;
+ int front_offset;
+ int front_size;
+
+ drm_handle_t back_handle;
+ int back_offset;
+ int back_size;
+
+ drm_handle_t depth_handle;
+ int depth_offset;
+ int depth_size;
+
+ drm_handle_t tex_handle;
+ int tex_offset;
+ int tex_size;
+ int log_tex_granularity;
+ int pitch;
+ int rotation; /* 0, 90, 180 or 270 */
+ int rotated_offset;
+ int rotated_size;
+ int rotated_pitch;
+ int virtualX, virtualY;
+
+ unsigned int front_tiled;
+ unsigned int back_tiled;
+ unsigned int depth_tiled;
+ unsigned int rotated_tiled;
+ unsigned int rotated2_tiled;
+
+ int pipeA_x;
+ int pipeA_y;
+ int pipeA_w;
+ int pipeA_h;
+ int pipeB_x;
+ int pipeB_y;
+ int pipeB_w;
+ int pipeB_h;
+
+ /* fill out some space for old userspace triple buffer */
+ drm_handle_t unused_handle;
+ __u32 unused1, unused2, unused3;
+
+ /* buffer object handles for static buffers. May change
+ * over the lifetime of the client.
+ */
+ __u32 front_bo_handle;
+ __u32 back_bo_handle;
+ __u32 unused_bo_handle;
+ __u32 depth_bo_handle;
+
+} drm_i915_sarea_t;
+
+/* due to userspace building against these headers we need some compat here */
+#define planeA_x pipeA_x
+#define planeA_y pipeA_y
+#define planeA_w pipeA_w
+#define planeA_h pipeA_h
+#define planeB_x pipeB_x
+#define planeB_y pipeB_y
+#define planeB_w pipeB_w
+#define planeB_h pipeB_h
+
+/* Flags for perf_boxes
+ */
+#define I915_BOX_RING_EMPTY 0x1
+#define I915_BOX_FLIP 0x2
+#define I915_BOX_WAIT 0x4
+#define I915_BOX_TEXTURE_LOAD 0x8
+#define I915_BOX_LOST_CONTEXT 0x10
+
+/*
+ * i915 specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
+ */
+#define DRM_I915_INIT 0x00
+#define DRM_I915_FLUSH 0x01
+#define DRM_I915_FLIP 0x02
+#define DRM_I915_BATCHBUFFER 0x03
+#define DRM_I915_IRQ_EMIT 0x04
+#define DRM_I915_IRQ_WAIT 0x05
+#define DRM_I915_GETPARAM 0x06
+#define DRM_I915_SETPARAM 0x07
+#define DRM_I915_ALLOC 0x08
+#define DRM_I915_FREE 0x09
+#define DRM_I915_INIT_HEAP 0x0a
+#define DRM_I915_CMDBUFFER 0x0b
+#define DRM_I915_DESTROY_HEAP 0x0c
+#define DRM_I915_SET_VBLANK_PIPE 0x0d
+#define DRM_I915_GET_VBLANK_PIPE 0x0e
+#define DRM_I915_VBLANK_SWAP 0x0f
+#define DRM_I915_HWS_ADDR 0x11
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_ENTERVT 0x19
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
+#define DRM_I915_GEM_SW_FINISH 0x20
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
+#define DRM_I915_GEM_GET_APERTURE 0x23
+#define DRM_I915_GEM_MMAP_GTT 0x24
+#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE 0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
+#define DRM_I915_OVERLAY_ATTRS 0x28
+#define DRM_I915_GEM_EXECBUFFER2 0x29
+#define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GEM_WAIT 0x2c
+#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
+#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
+#define DRM_I915_GEM_SET_CACHING 0x2f
+#define DRM_I915_GEM_GET_CACHING 0x30
+#define DRM_I915_REG_READ 0x31
+#define DRM_I915_GET_RESET_STATS 0x32
+#define DRM_I915_GEM_USERPTR 0x33
+#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
+#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
+#define DRM_I915_PERF_OPEN 0x36
+#define DRM_I915_PERF_ADD_CONFIG 0x37
+#define DRM_I915_PERF_REMOVE_CONFIG 0x38
+#define DRM_I915_QUERY 0x39
+#define DRM_I915_GEM_VM_CREATE 0x3a
+#define DRM_I915_GEM_VM_DESTROY 0x3b
+#define DRM_I915_PXP_OPS 0x3c
+/* Must be kept compact -- no holes */
+
+#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
+#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
+#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
+#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
+#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
+#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
+#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
+#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
+#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
+#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
+#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create_ext)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
+#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
+#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
+#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
+#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
+#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
+#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
+#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
+#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
+#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_PXP_OPS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_PXP_OPS, struct drm_i915_pxp_ops)
+
+/* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+ */
+typedef struct drm_i915_batchbuffer {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer_t;
+
+/* As above, but pass a pointer to userspace buffer which can be
+ * validated by the kernel prior to sending to hardware.
+ */
+typedef struct _drm_i915_cmdbuffer {
+ char *buf; /* pointer to userspace command buffer */
+ int sz; /* nr bytes in buf */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer_t;
+
+/* Userspace can request & wait on irq's:
+ */
+typedef struct drm_i915_irq_emit {
+ int *irq_seq;
+} drm_i915_irq_emit_t;
+
+typedef struct drm_i915_irq_wait {
+ int irq_seq;
+} drm_i915_irq_wait_t;
+
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE 0
+#define I915_GEM_PPGTT_ALIASING 1
+#define I915_GEM_PPGTT_FULL 2
+
+/* Ioctl to query kernel params:
+ */
+#define I915_PARAM_IRQ_ACTIVE 1
+#define I915_PARAM_ALLOW_BATCHBUFFER 2
+#define I915_PARAM_LAST_DISPATCH 3
+#define I915_PARAM_CHIPSET_ID 4
+#define I915_PARAM_HAS_GEM 5
+#define I915_PARAM_NUM_FENCES_AVAIL 6
+#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_HAS_PAGEFLIPPING 8
+#define I915_PARAM_HAS_EXECBUF2 9
+#define I915_PARAM_HAS_BSD 10
+#define I915_PARAM_HAS_BLT 11
+#define I915_PARAM_HAS_RELAXED_FENCING 12
+#define I915_PARAM_HAS_COHERENT_RINGS 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS 14
+#define I915_PARAM_HAS_RELAXED_DELTA 15
+#define I915_PARAM_HAS_GEN7_SOL_RESET 16
+#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_ALIASING_PPGTT 18
+#define I915_PARAM_HAS_WAIT_TIMEOUT 19
+#define I915_PARAM_HAS_SEMAPHORES 20
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
+#define I915_PARAM_HAS_VEBOX 22
+#define I915_PARAM_HAS_SECURE_BATCHES 23
+#define I915_PARAM_HAS_PINNED_BATCHES 24
+#define I915_PARAM_HAS_EXEC_NO_RELOC 25
+#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
+#define I915_PARAM_HAS_WT 27
+#define I915_PARAM_CMD_PARSER_VERSION 28
+#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
+#define I915_PARAM_MMAP_VERSION 30
+#define I915_PARAM_HAS_BSD2 31
+#define I915_PARAM_REVISION 32
+#define I915_PARAM_SUBSLICE_TOTAL 33
+#define I915_PARAM_EU_TOTAL 34
+#define I915_PARAM_HAS_GPU_RESET 35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
+#define I915_PARAM_HAS_EXEC_SOFTPIN 37
+#define I915_PARAM_HAS_POOLED_EU 38
+#define I915_PARAM_MIN_EU_IN_POOL 39
+#define I915_PARAM_MMAP_GTT_VERSION 40
+
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+ * priorities and the driver will attempt to execute batches in priority order.
+ * The param returns a capability bitmask, nonzero implies that the scheduler
+ * is enabled, with different features present according to the mask.
+ *
+ * The initial priority for each batch is supplied by the context and is
+ * controlled via I915_CONTEXT_PARAM_PRIORITY.
+ */
+#define I915_PARAM_HAS_SCHEDULER 41
+#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
+#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
+#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
+#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
+#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
+
+#define I915_PARAM_HUC_STATUS 42
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
+ * synchronisation with implicit fencing on individual objects.
+ * See EXEC_OBJECT_ASYNC.
+ */
+#define I915_PARAM_HAS_EXEC_ASYNC 43
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
+ * both being able to pass in a sync_file fd to wait upon before executing,
+ * and being able to return a new sync_file fd that is signaled when the
+ * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
+ */
+#define I915_PARAM_HAS_EXEC_FENCE 44
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
+ * user specified bufffers for post-mortem debugging of GPU hangs. See
+ * EXEC_OBJECT_CAPTURE.
+ */
+#define I915_PARAM_HAS_EXEC_CAPTURE 45
+
+#define I915_PARAM_SLICE_MASK 46
+
+/* Assuming it's uniform for each slice, this queries the mask of subslices
+ * per-slice for this system.
+ */
+#define I915_PARAM_SUBSLICE_MASK 47
+
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
+ * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
+ */
+#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
+ */
+#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
+
+/*
+ * Query whether every context (both per-file default and user created) is
+ * isolated (insofar as HW supports). If this parameter is not true, then
+ * freshly created contexts may inherit values from an existing context,
+ * rather than default HW values. If true, it also ensures (insofar as HW
+ * supports) that all state set by this context will not leak to any other
+ * context.
+ *
+ * As not every engine across every gen support contexts, the returned
+ * value reports the support of context isolation for individual engines by
+ * returning a bitmask of each engine class set to true if that class supports
+ * isolation.
+ */
+#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
+
+/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
+ * registers. This used to be fixed per platform but from CNL onwards, this
+ * might vary depending on the parts.
+ */
+#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
+
+/*
+ * Once upon a time we supposed that writes through the GGTT would be
+ * immediately in physical memory (once flushed out of the CPU path). However,
+ * on a few different processors and chipsets, this is not necessarily the case
+ * as the writes appear to be buffered internally. Thus a read of the backing
+ * storage (physical memory) via a different path (with different physical tags
+ * to the indirect write via the GGTT) will see stale values from before
+ * the GGTT write. Inside the kernel, we can for the most part keep track of
+ * the different read/write domains in use (e.g. set-domain), but the assumption
+ * of coherency is baked into the ABI, hence reporting its true state in this
+ * parameter.
+ *
+ * Reports true when writes via mmap_gtt are immediately visible following an
+ * lfence to flush the WCB.
+ *
+ * Reports false when writes via mmap_gtt are indeterminately delayed in an in
+ * internal buffer and are _not_ immediately visible to third parties accessing
+ * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
+ * communications channel when reporting false is strongly disadvised.
+ */
+#define I915_PARAM_MMAP_GTT_COHERENT 52
+
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
+ * execution through use of explicit fence support.
+ * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
+ */
+#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
+
+/*
+ * Revision of the i915-perf uAPI. The value returned helps determine what
+ * i915-perf features are available. See drm_i915_perf_property_id.
+ */
+#define I915_PARAM_PERF_REVISION 54
+
+/* Must be kept compact -- no holes and well documented */
+
+typedef struct drm_i915_getparam {
+ __s32 param;
+ /*
+ * WARNING: Using pointers instead of fixed-size u64 means we need to write
+ * compat32 code. Don't repeat this mistake.
+ */
+ int *value;
+} drm_i915_getparam_t;
+
+/* Ioctl to set kernel params:
+ */
+#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
+#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
+#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+#define I915_SETPARAM_NUM_USED_FENCES 4
+/* Must be kept compact -- no holes */
+
+typedef struct drm_i915_setparam {
+ int param;
+ int value;
+} drm_i915_setparam_t;
+
+/* A memory manager for regions of shared memory:
+ */
+#define I915_MEM_REGION_AGP 1
+
+typedef struct drm_i915_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int *region_offset; /* offset from start of fb or agp */
+} drm_i915_mem_alloc_t;
+
+typedef struct drm_i915_mem_free {
+ int region;
+ int region_offset;
+} drm_i915_mem_free_t;
+
+typedef struct drm_i915_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_i915_mem_init_heap_t;
+
+/* Allow memory manager to be torn down and re-initialized (eg on
+ * rotate):
+ */
+typedef struct drm_i915_mem_destroy_heap {
+ int region;
+} drm_i915_mem_destroy_heap_t;
+
+/* Allow X server to configure which pipes to monitor for vblank signals
+ */
+#define DRM_I915_VBLANK_PIPE_A 1
+#define DRM_I915_VBLANK_PIPE_B 2
+
+typedef struct drm_i915_vblank_pipe {
+ int pipe;
+} drm_i915_vblank_pipe_t;
+
+/* Schedule buffer swap at given vertical blank:
+ */
+typedef struct drm_i915_vblank_swap {
+ drm_drawable_t drawable;
+ enum drm_vblank_seq_type seqtype;
+ unsigned int sequence;
+} drm_i915_vblank_swap_t;
+
+typedef struct drm_i915_hws_addr {
+ __u64 addr;
+} drm_i915_hws_addr_t;
+
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ __u64 gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ __u64 gtt_end;
+};
+
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ __u64 size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_i915_gem_create_ext {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ __u64 size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+ __u32 pad;
+#define I915_GEM_CREATE_EXT_SETPARAM (1u << 0)
+#define I915_GEM_CREATE_EXT_FLAGS_UNKNOWN \
+ (-(I915_GEM_CREATE_EXT_SETPARAM << 1))
+ __u64 extensions;
+
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to read from */
+ __u64 offset;
+ /** Length of data to read */
+ __u64 size;
+ /**
+ * Pointer to write the data into.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to write to */
+ __u64 offset;
+ /** Length of data to write */
+ __u64 size;
+ /**
+ * Pointer to read the data from.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset in the object to map. */
+ __u64 offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ __u64 size;
+ /**
+ * Returned pointer the data was mapped at.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 addr_ptr;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * Added in version 2.
+ */
+ __u64 flags;
+#define I915_MMAP_WC 0x1
+};
+
+struct drm_i915_gem_mmap_gtt {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_i915_gem_mmap_offset {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * It is mandatory that one of the MMAP_OFFSET types
+ * (GTT, WC, WB, UC, etc) should be included.
+ */
+ __u64 flags;
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+
+ /*
+ * Zero-terminated chain of extensions.
+ *
+ * No current extensions defined; mbz.
+ */
+ __u64 extensions;
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ __u32 handle;
+
+ /** New read domains */
+ __u32 read_domains;
+
+ /** New write domain */
+ __u32 write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+ /** Handle for the object */
+ __u32 handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ __u32 target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ __u32 delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ __u64 offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ __u64 presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ __u32 read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ __u32 write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT 0x00000040
+/** WC domain - uncached access */
+#define I915_GEM_DOMAIN_WC 0x00000080
+/** @} */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ __u64 offset;
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ __u64 buffers_ptr;
+ __u32 buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ __u32 batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
+};
+
+struct drm_i915_gem_exec_object2 {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * When the EXEC_OBJECT_PINNED flag is specified this is populated by
+ * the user with the GTT offset at which this object will be pinned.
+ * When the I915_EXEC_NO_RELOC flag is specified this must contain the
+ * presumed_offset of the object.
+ * During execbuffer2 the kernel populates it with the value of the
+ * current GTT offset of the object, for future presumed_offset writes.
+ */
+ __u64 offset;
+
+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT (1<<1)
+#define EXEC_OBJECT_WRITE (1<<2)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+#define EXEC_OBJECT_PINNED (1<<4)
+#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
+/* The kernel implicitly tracks GPU activity on all GEM objects, and
+ * synchronises operations with outstanding rendering. This includes
+ * rendering on other devices if exported via dma-buf. However, sometimes
+ * this tracking is too coarse and the user knows better. For example,
+ * if the object is split into non-overlapping ranges shared between different
+ * clients or engines (i.e. suballocating objects), the implicit tracking
+ * by kernel assumes that each operation affects the whole object rather
+ * than an individual range, causing needless synchronisation between clients.
+ * The kernel will also forgo any CPU cache flushes prior to rendering from
+ * the object as the client is expected to be also handling such domain
+ * tracking.
+ *
+ * The kernel maintains the implicit tracking in order to manage resources
+ * used by the GPU - this flag only disables the synchronisation prior to
+ * rendering with this object in this execbuf.
+ *
+ * Opting out of implicit synhronisation requires the user to do its own
+ * explicit tracking to avoid rendering corruption. See, for example,
+ * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
+ */
+#define EXEC_OBJECT_ASYNC (1<<6)
+/* Request that the contents of this execobject be copied into the error
+ * state upon a GPU hang involving this batch for post-mortem debugging.
+ * These buffers are recorded in no particular order as "user" in
+ * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
+ * if the kernel supports this flag.
+ */
+#define EXEC_OBJECT_CAPTURE (1<<7)
+/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
+ __u64 flags;
+
+ union {
+ __u64 rsvd1;
+ __u64 pad_to_size;
+ };
+ __u64 rsvd2;
+};
+
+struct drm_i915_gem_exec_fence {
+ /**
+ * User's handle for a drm_syncobj to wait on or signal.
+ */
+ __u32 handle;
+
+#define I915_EXEC_FENCE_WAIT (1<<0)
+#define I915_EXEC_FENCE_SIGNAL (1<<1)
+#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
+ __u32 flags;
+};
+
+struct drm_i915_gem_execbuffer2 {
+ /**
+ * List of gem_exec_object2 structs
+ */
+ __u64 buffers_ptr;
+ __u32 buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ __u32 batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
+ /**
+ * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
+ * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
+ * struct drm_i915_gem_exec_fence *fences.
+ */
+ __u64 cliprects_ptr;
+#define I915_EXEC_RING_MASK (0x3f)
+#define I915_EXEC_DEFAULT (0<<0)
+#define I915_EXEC_RENDER (1<<0)
+#define I915_EXEC_BSD (2<<0)
+#define I915_EXEC_BLT (3<<0)
+#define I915_EXEC_VEBOX (4<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK (3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
+ __u64 flags;
+ __u64 rsvd1; /* now used for context info */
+ __u64 rsvd2;
+};
+
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE (1<<9)
+
+/** Inform the kernel that the batch is and will always be pinned. This
+ * negates the requirement for a workaround to be performed to avoid
+ * an incoherent CS (such as can be found on 830/845). If this flag is
+ * not passed, the kernel will endeavour to make sure the batch is
+ * coherent with the CS before execution. If this flag is passed,
+ * userspace assumes the responsibility for ensuring the same.
+ */
+#define I915_EXEC_IS_PINNED (1<<10)
+
+/** Provide a hint to the kernel that the command stream and auxiliary
+ * state buffers already holds the correct presumed addresses and so the
+ * relocation process may be skipped if no buffers need to be moved in
+ * preparation for the execbuffer.
+ */
+#define I915_EXEC_NO_RELOC (1<<11)
+
+/** Use the reloc.handle as an index into the exec object array rather
+ * than as the per-file handle.
+ */
+#define I915_EXEC_HANDLE_LUT (1<<12)
+
+/** Used for switching BSD rings on the platforms with two BSD rings */
+#define I915_EXEC_BSD_SHIFT (13)
+#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
+/* default ping-pong mode */
+#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
+
+/** Tell the kernel that the batchbuffer is processed by
+ * the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER (1<<15)
+
+/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_IN (1<<16)
+
+/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
+ * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
+ * to the caller, and it should be close() after use. (The fd is a regular
+ * file descriptor and will be cleaned up on process termination. It holds
+ * a reference to the request, but nothing else.)
+ *
+ * The sync_file fd can be combined with other sync_file and passed either
+ * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
+ * will only occur after this request completes), or to other devices.
+ *
+ * Using I915_EXEC_FENCE_OUT requires use of
+ * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
+ * back to userspace. Failure to do so will cause the out-fence to always
+ * be reported as zero, and the real fence fd to be leaked.
+ */
+#define I915_EXEC_FENCE_OUT (1<<17)
+
+/*
+ * Traditionally the execbuf ioctl has only considered the final element in
+ * the execobject[] to be the executable batch. Often though, the client
+ * will known the batch object prior to construction and being able to place
+ * it into the execobject[] array first can simplify the relocation tracking.
+ * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
+ * execobject[] as the * batch instead (the default is to use the last
+ * element).
+ */
+#define I915_EXEC_BATCH_FIRST (1<<18)
+
+/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
+ * define an array of i915_gem_exec_fence structures which specify a set of
+ * dma fences to wait upon or signal.
+ */
+#define I915_EXEC_FENCE_ARRAY (1<<19)
+
+/*
+ * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_SUBMIT (1 << 20)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
+
+#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
+#define i915_execbuffer2_set_context_id(eb2, context) \
+ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
+#define i915_execbuffer2_get_context_id(eb2) \
+ ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ __u32 handle;
+ __u32 pad;
+
+ /** alignment required within the aperture */
+ __u64 alignment;
+
+ /** Returned GTT offset of the buffer. */
+ __u64 offset;
+};
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ __u32 handle;
+
+ /** Return busy status
+ *
+ * A return of 0 implies that the object is idle (after
+ * having flushed any pending activity), and a non-zero return that
+ * the object is still in-flight on the GPU. (The GPU has not yet
+ * signaled completion for all pending requests that reference the
+ * object.) An object is guaranteed to become idle eventually (so
+ * long as no new GPU commands are executed upon it). Due to the
+ * asynchronous nature of the hardware, an object reported
+ * as busy may become idle before the ioctl is completed.
+ *
+ * Furthermore, if the object is busy, which engine is busy is only
+ * provided as a guide and only indirectly by reporting its class
+ * (there may be more than one engine in each class). There are race
+ * conditions which prevent the report of which engines are busy from
+ * being always accurate. However, the converse is not true. If the
+ * object is idle, the result of the ioctl, that all engines are idle,
+ * is accurate.
+ *
+ * The returned dword is split into two fields to indicate both
+ * the engine classess on which the object is being read, and the
+ * engine class on which it is currently being written (if any).
+ *
+ * The low word (bits 0:15) indicate if the object is being written
+ * to by any engine (there can only be one, as the GEM implicit
+ * synchronisation rules force writes to be serialised). Only the
+ * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
+ * 1 not 0 etc) for the last write is reported.
+ *
+ * The high word (bits 16:31) are a bitmask of which engines classes
+ * are currently reading from the object. Multiple engines may be
+ * reading from the object simultaneously.
+ *
+ * The value of each engine class is the same as specified in the
+ * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
+ * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
+ * reported as active itself. Some hardware may have parallel
+ * execution engines, e.g. multiple media engines, which are
+ * mapped to the same class identifier and so are not separately
+ * reported for busyness.
+ *
+ * Caveat emptor:
+ * Only the boolean result of this query is reliable; that is whether
+ * the object is idle or busy. The report of which engines are busy
+ * should be only used as a heuristic.
+ */
+ __u32 busy;
+};
+
+/**
+ * I915_CACHING_NONE
+ *
+ * GPU access is not coherent with cpu caches. Default for machines without an
+ * LLC.
+ */
+#define I915_CACHING_NONE 0
+/**
+ * I915_CACHING_CACHED
+ *
+ * GPU access is coherent with cpu caches and furthermore the data is cached in
+ * last-level caches shared between cpu cores and the gpu GT. Default on
+ * machines with HAS_LLC.
+ */
+#define I915_CACHING_CACHED 1
+/**
+ * I915_CACHING_DISPLAY
+ *
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no special
+ * cache mode (like write-through or gfdt flushing) is available. The kernel
+ * automatically sets this mode when using a buffer as a scanout target.
+ * Userspace can manually set this mode to avoid a costly stall and clflush in
+ * the hotpath of drawing the first frame.
+ */
+#define I915_CACHING_DISPLAY 2
+
+struct drm_i915_gem_caching {
+ /**
+ * Handle of the buffer to set/get the caching level of. */
+ __u32 handle;
+
+ /**
+ * Cacheing level to apply or return value
+ *
+ * bits0-15 are for generic caching control (i.e. the above defined
+ * values). bits16-31 are reserved for platform-specific variations
+ * (e.g. l3$ caching on gen7). */
+ __u32 caching;
+};
+
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+#define I915_TILING_LAST I915_TILING_Y
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+/* Seen by userland. */
+#define I915_BIT_6_SWIZZLE_9_17 6
+#define I915_BIT_6_SWIZZLE_9_10_17 7
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ __u32 handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ __u32 tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ __u32 stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ __u32 swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ __u32 handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ __u32 tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ __u32 swizzle_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping whilst bound.
+ */
+ __u32 phys_swizzle_mode;
+};
+
+struct drm_i915_gem_get_aperture {
+ /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
+ __u64 aper_size;
+
+ /**
+ * Available space in the aperture used by i915_gem_execbuffer, in
+ * bytes
+ */
+ __u64 aper_available_size;
+};
+
+struct drm_i915_get_pipe_from_crtc_id {
+ /** ID of CRTC being requested **/
+ __u32 crtc_id;
+
+ /** pipe of requested CRTC **/
+ __u32 pipe;
+};
+
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+#define __I915_MADV_PURGED 2 /* internal state */
+
+struct drm_i915_gem_madvise {
+ /** Handle of the buffer to change the backing store advice */
+ __u32 handle;
+
+ /* Advice: either the buffer will be needed again in the near future,
+ * or wont be and could be discarded under memory pressure.
+ */
+ __u32 madv;
+
+ /** Whether the backing store still exists. */
+ __u32 retained;
+};
+
+/* flags */
+#define I915_OVERLAY_TYPE_MASK 0xff
+#define I915_OVERLAY_YUV_PLANAR 0x01
+#define I915_OVERLAY_YUV_PACKED 0x02
+#define I915_OVERLAY_RGB 0x03
+
+#define I915_OVERLAY_DEPTH_MASK 0xff00
+#define I915_OVERLAY_RGB24 0x1000
+#define I915_OVERLAY_RGB16 0x2000
+#define I915_OVERLAY_RGB15 0x3000
+#define I915_OVERLAY_YUV422 0x0100
+#define I915_OVERLAY_YUV411 0x0200
+#define I915_OVERLAY_YUV420 0x0300
+#define I915_OVERLAY_YUV410 0x0400
+
+#define I915_OVERLAY_SWAP_MASK 0xff0000
+#define I915_OVERLAY_NO_SWAP 0x000000
+#define I915_OVERLAY_UV_SWAP 0x010000
+#define I915_OVERLAY_Y_SWAP 0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
+
+#define I915_OVERLAY_FLAGS_MASK 0xff000000
+#define I915_OVERLAY_ENABLE 0x01000000
+
+struct drm_intel_overlay_put_image {
+ /* various flags and src format description */
+ __u32 flags;
+ /* source picture description */
+ __u32 bo_handle;
+ /* stride values and offsets are in bytes, buffer relative */
+ __u16 stride_Y; /* stride for packed formats */
+ __u16 stride_UV;
+ __u32 offset_Y; /* offset for packet formats */
+ __u32 offset_U;
+ __u32 offset_V;
+ /* in pixels */
+ __u16 src_width;
+ __u16 src_height;
+ /* to compensate the scaling factors for partially covered surfaces */
+ __u16 src_scan_width;
+ __u16 src_scan_height;
+ /* output crtc description */
+ __u32 crtc_id;
+ __u16 dst_x;
+ __u16 dst_y;
+ __u16 dst_width;
+ __u16 dst_height;
+};
+
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
+struct drm_intel_overlay_attrs {
+ __u32 flags;
+ __u32 color_key;
+ __s32 brightness;
+ __u32 contrast;
+ __u32 saturation;
+ __u32 gamma0;
+ __u32 gamma1;
+ __u32 gamma2;
+ __u32 gamma3;
+ __u32 gamma4;
+ __u32 gamma5;
+};
+
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple. Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent. All other pixels will
+ * be displayed on top of the primary plane. For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
+ * flags==0 to disable colorkeying.
+ */
+#define I915_SET_COLORKEY_DESTINATION (1<<1)
+#define I915_SET_COLORKEY_SOURCE (1<<2)
+struct drm_intel_sprite_colorkey {
+ __u32 plane_id;
+ __u32 min_value;
+ __u32 channel_mask;
+ __u32 max_value;
+ __u32 flags;
+};
+
+struct drm_i915_gem_wait {
+ /** Handle of BO we shall wait on */
+ __u32 bo_handle;
+ __u32 flags;
+ /** Number of nanoseconds to wait, Returns time remaining. */
+ __s64 timeout_ns;
+};
+
+struct drm_i915_gem_context_create {
+ __u32 ctx_id; /* output: id of new context*/
+ __u32 pad;
+};
+
+struct drm_i915_gem_context_create_ext {
+ __u32 ctx_id; /* output: id of new context*/
+ __u32 flags;
+#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
+#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
+ (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
+ __u64 extensions;
+};
+
+struct drm_i915_gem_context_param {
+ __u32 ctx_id;
+ __u32 size;
+ __u64 param;
+#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
+#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
+#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
+#define I915_CONTEXT_PARAM_BANNABLE 0x5
+#define I915_CONTEXT_PARAM_PRIORITY 0x6
+#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
+#define I915_CONTEXT_DEFAULT_PRIORITY 0
+#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
+ /*
+ * When using the following param, value should be a pointer to
+ * drm_i915_gem_context_param_sseu.
+ */
+#define I915_CONTEXT_PARAM_SSEU 0x7
+
+/*
+ * Not all clients may want to attempt automatic recover of a context after
+ * a hang (for example, some clients may only submit very small incremental
+ * batches relying on known logical state of previous batches which will never
+ * recover correctly and each attempt will hang), and so would prefer that
+ * the context is forever banned instead.
+ *
+ * If set to false (0), after a reset, subsequent (and in flight) rendering
+ * from this context is discarded, and the client will need to create a new
+ * context to use instead.
+ *
+ * If set to true (1), the kernel will automatically attempt to recover the
+ * context by skipping the hanging batch and executing the next batch starting
+ * from the default context state (discarding the incomplete logical context
+ * state lost due to the reset).
+ *
+ * On creation, all new contexts are marked as recoverable.
+ */
+#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
+
+ /*
+ * The id of the associated virtual memory address space (ppGTT) of
+ * this context. Can be retrieved and passed to another context
+ * (on the same fd) for both to use the same ppGTT and so share
+ * address layouts, and avoid reloading the page tables on context
+ * switches between themselves.
+ *
+ * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
+ */
+#define I915_CONTEXT_PARAM_VM 0x9
+
+/*
+ * I915_CONTEXT_PARAM_ENGINES:
+ *
+ * Bind this context to operate on this subset of available engines. Henceforth,
+ * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
+ * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
+ * and upwards. Slots 0...N are filled in using the specified (class, instance).
+ * Use
+ * engine_class: I915_ENGINE_CLASS_INVALID,
+ * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
+ * to specify a gap in the array that can be filled in later, e.g. by a
+ * virtual engine used for load balancing.
+ *
+ * Setting the number of engines bound to the context to 0, by passing a zero
+ * sized argument, will revert back to default settings.
+ *
+ * See struct i915_context_param_engines.
+ *
+ * Extensions:
+ * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
+ * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ */
+#define I915_CONTEXT_PARAM_ENGINES 0xa
+
+/*
+ * I915_CONTEXT_PARAM_PERSISTENCE:
+ *
+ * Allow the context and active rendering to survive the process until
+ * completion. Persistence allows fire-and-forget clients to queue up a
+ * bunch of work, hand the output over to a display server and then quit.
+ * If the context is marked as not persistent, upon closing (either via
+ * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
+ * or process termination), the context and any outstanding requests will be
+ * cancelled (and exported fences for cancelled requests marked as -EIO).
+ *
+ * By default, new contexts allow persistence.
+ */
+#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
+
+/*
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ *
+ * If set to true (1) PAVP content protection is enabled.
+ * When enabled, the context is marked unrecoverable and may
+ * become invalid due to PAVP teardown event or other error.
+ */
+#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
+/* Must be kept compact -- no holes and well documented */
+
+ __u64 value;
+};
+
+struct drm_i915_gem_object_param {
+ /* Object handle (0 for I915_GEM_CREATE_EXT_SETPARAM) */
+ __u32 handle;
+
+ /* Data pointer size */
+ __u32 size;
+
+/*
+ * I915_OBJECT_PARAM:
+ *
+ * Select object namespace for the param.
+ */
+#define I915_OBJECT_PARAM (1ull<<32)
+
+/*
+ * I915_PARAM_PROTECTED_CONTENT:
+ *
+ * If set to true (1) buffer contents is expected to be protected by
+ * PAVP encryption and requires decryption for scan out and processing.
+ * Protected buffers can only be used in PAVP protected contexts.
+ * A protected buffer may become invalid as a result of PAVP teardown.
+ */
+#define I915_PARAM_PROTECTED_CONTENT 0x1
+
+ __u64 param;
+
+ /* Data value or pointer */
+ __u64 data;
+};
+
+struct drm_i915_gem_create_ext_setparam {
+ struct i915_user_extension base;
+ struct drm_i915_gem_object_param param;
+};
+
+/**
+ * Context SSEU programming
+ *
+ * It may be necessary for either functional or performance reason to configure
+ * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
+ * Sub-slice/EU).
+ *
+ * This is done by configuring SSEU configuration using the below
+ * @struct drm_i915_gem_context_param_sseu for every supported engine which
+ * userspace intends to use.
+ *
+ * Not all GPUs or engines support this functionality in which case an error
+ * code -ENODEV will be returned.
+ *
+ * Also, flexibility of possible SSEU configuration permutations varies between
+ * GPU generations and software imposed limitations. Requesting such a
+ * combination will return an error code of -EINVAL.
+ *
+ * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
+ * favour of a single global setting.
+ */
+struct drm_i915_gem_context_param_sseu {
+ /*
+ * Engine class & instance to be configured or queried.
+ */
+ struct i915_engine_class_instance engine;
+
+ /*
+ * Unknown flags must be cleared to zero.
+ */
+ __u32 flags;
+#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
+
+ /*
+ * Mask of slices to enable for the context. Valid values are a subset
+ * of the bitmask value returned for I915_PARAM_SLICE_MASK.
+ */
+ __u64 slice_mask;
+
+ /*
+ * Mask of subslices to enable for the context. Valid values are a
+ * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
+ */
+ __u64 subslice_mask;
+
+ /*
+ * Minimum/Maximum number of EUs to enable per subslice for the
+ * context. min_eus_per_subslice must be inferior or equal to
+ * max_eus_per_subslice.
+ */
+ __u16 min_eus_per_subslice;
+ __u16 max_eus_per_subslice;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 rsvd;
+};
+
+/*
+ * i915_context_engines_load_balance:
+ *
+ * Enable load balancing across this set of engines.
+ *
+ * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
+ * used will proxy the execbuffer request onto one of the set of engines
+ * in such a way as to distribute the load evenly across the set.
+ *
+ * The set of engines must be compatible (e.g. the same HW class) as they
+ * will share the same logical GPU context and ring.
+ *
+ * To intermix rendering with the virtual engine and direct rendering onto
+ * the backing engines (bypassing the load balancing proxy), the context must
+ * be defined to use a single timeline for all engines.
+ */
+struct i915_context_engines_load_balance {
+ struct i915_user_extension base;
+
+ __u16 engine_index;
+ __u16 num_siblings;
+ __u32 flags; /* all undefined flags must be zero */
+
+ __u64 mbz64; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 num_siblings; \
+ __u32 flags; \
+ __u64 mbz64; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/*
+ * i915_context_engines_bond:
+ *
+ * Constructed bonded pairs for execution within a virtual engine.
+ *
+ * All engines are equal, but some are more equal than others. Given
+ * the distribution of resources in the HW, it may be preferable to run
+ * a request on a given subset of engines in parallel to a request on a
+ * specific engine. We enable this selection of engines within a virtual
+ * engine by specifying bonding pairs, for any given master engine we will
+ * only execute on one of the corresponding siblings within the virtual engine.
+ *
+ * To execute a request in parallel on the master engine and a sibling requires
+ * coordination with a I915_EXEC_FENCE_SUBMIT.
+ */
+struct i915_context_engines_bond {
+ struct i915_user_extension base;
+
+ struct i915_engine_class_instance master;
+
+ __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
+ __u16 num_bonds;
+
+ __u64 flags; /* all undefined flags must be zero */
+ __u64 mbz64[4]; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
+ struct i915_user_extension base; \
+ struct i915_engine_class_instance master; \
+ __u16 virtual_index; \
+ __u16 num_bonds; \
+ __u64 flags; \
+ __u64 mbz64[4]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+struct i915_context_param_engines {
+ __u64 extensions; /* linked chain of extension blocks, 0 terminates */
+#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
+#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
+ __u64 extensions; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+struct drm_i915_gem_context_create_ext_setparam {
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+ struct i915_user_extension base;
+ struct drm_i915_gem_context_param param;
+};
+
+struct drm_i915_gem_context_create_ext_clone {
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
+ struct i915_user_extension base;
+ __u32 clone_id;
+ __u32 flags;
+#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
+#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
+#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
+#define I915_CONTEXT_CLONE_SSEU (1u << 3)
+#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
+#define I915_CONTEXT_CLONE_VM (1u << 5)
+#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
+ __u64 rsvd;
+};
+
+struct drm_i915_gem_context_destroy {
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+/*
+ * DRM_I915_GEM_VM_CREATE -
+ *
+ * Create a new virtual memory address space (ppGTT) for use within a context
+ * on the same file. Extensions can be provided to configure exactly how the
+ * address space is setup upon creation.
+ *
+ * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
+ * returned in the outparam @id.
+ *
+ * No flags are defined, with all bits reserved and must be zero.
+ *
+ * An extension chain maybe provided, starting with @extensions, and terminated
+ * by the @next_extension being 0. Currently, no extensions are defined.
+ *
+ * DRM_I915_GEM_VM_DESTROY -
+ *
+ * Destroys a previously created VM id, specified in @id.
+ *
+ * No extensions or flags are allowed currently, and so must be zero.
+ */
+struct drm_i915_gem_vm_control {
+ __u64 extensions;
+ __u32 flags;
+ __u32 vm_id;
+};
+
+/*
+ * struct pxp_sm_query_pxp_tag - Params to query the PXP tag of specified
+ * session id and whether the session is alive from PXP state machine.
+ */
+struct pxp_sm_query_pxp_tag {
+ __u32 session_is_alive;
+ __u32 pxp_tag; /* in - Session ID, out pxp tag */
+};
+
+/*
+ * struct pxp_set_session_status_params - Params to reserved, set or destroy
+ * the session from the PXP state machine.
+ */
+struct pxp_set_session_status_params {
+ __u32 pxp_tag; /* in [optional], for Arbitrator session, out pxp tag */
+ __u32 session_type; /* in, session type */
+ __u32 session_mode; /* in, session mode */
+ __u32 req_session_state; /* in, new session state */
+};
+
+/*
+ * struct pxp_tee_io_message_params - Params to send/receive message to/from TEE.
+ */
+struct pxp_tee_io_message_params {
+ __u8 *msg_in; /* in - message input */
+ __u32 msg_in_size; /* in - message input size */
+ __u8 *msg_out; /* in - message output buffer */
+ __u32 msg_out_size; /* out- message output size from TEE */
+ __u32 msg_out_buf_size; /* in - message output buffer size */
+};
+
+/*
+ * struct pxp_info - Params for PXP operation.
+ */
+struct pxp_info {
+ __u32 action; /* in - specified action of this operation */
+ __u32 sm_status; /* out - status output for this operation */
+
+ union {
+ /* in - action params to query PXP tag */
+ struct pxp_sm_query_pxp_tag query_pxp_tag;
+ /* in - action params to set the PXP session state */
+ struct pxp_set_session_status_params set_session_status;
+ /* in - action params to send TEE commands */
+ struct pxp_tee_io_message_params tee_io_message;
+
+ /* in - action params to set user space context */
+ __u32 set_user_ctx;
+ };
+} __attribute__((packed));
+
+/*
+ * DRM_I915_PXP_OPS -
+ *
+ * PXP is an i915 componment, that helps user space to establish the hardware
+ * protected session and manage the status of each alive software session,
+ * as well as the life cycle of each session.
+ *
+ * This ioctl is to allow user space driver to create, set, and destroy each
+ * session. It also provides the communication chanel to TEE (Trusted
+ * Execution Environment) for the protected hardware session creation.
+ */
+struct drm_i915_pxp_ops {
+ /* in - user space pointer to struct pxp_info */
+ struct pxp_info *info_ptr;
+
+ /* in - memory size that info_ptr points to */
+ __u32 info_size;
+};
+
+struct drm_i915_reg_read {
+ /*
+ * Register offset.
+ * For 64bit wide registers where the upper 32bits don't immediately
+ * follow the lower 32bits, the offset of the lower 32bits must
+ * be specified
+ */
+ __u64 offset;
+#define I915_REG_READ_8B_WA (1ul << 0)
+
+ __u64 val; /* Return value */
+};
+
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ * single instruction 8byte read, in order to workaround that pass
+ * flag I915_REG_READ_8B_WA in offset field.
+ *
+ */
+
+struct drm_i915_reset_stats {
+ __u32 ctx_id;
+ __u32 flags;
+
+ /* All resets since boot/module reload, for all contexts */
+ __u32 reset_count;
+
+ /* Number of batches lost when active in GPU, for this context */
+ __u32 batch_active;
+
+ /* Number of batches lost pending for execution, for this context */
+ __u32 batch_pending;
+
+ __u32 pad;
+};
+
+struct drm_i915_gem_userptr {
+ __u64 user_ptr;
+ __u64 user_size;
+ __u32 flags;
+#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
+enum drm_i915_oa_format {
+ I915_OA_FORMAT_A13 = 1, /* HSW only */
+ I915_OA_FORMAT_A29, /* HSW only */
+ I915_OA_FORMAT_A13_B8_C8, /* HSW only */
+ I915_OA_FORMAT_B4_C8, /* HSW only */
+ I915_OA_FORMAT_A45_B8_C8, /* HSW only */
+ I915_OA_FORMAT_B4_C8_A16, /* HSW only */
+ I915_OA_FORMAT_C4_B8, /* HSW+ */
+
+ /* Gen8+ */
+ I915_OA_FORMAT_A12,
+ I915_OA_FORMAT_A12_B8_C8,
+ I915_OA_FORMAT_A32u40_A4u32_B8_C8,
+
+ I915_OA_FORMAT_MAX /* non-ABI */
+};
+
+enum drm_i915_perf_property_id {
+ /**
+ * Open the stream for a specific context handle (as used with
+ * execbuffer2). A stream opened for a specific context this way
+ * won't typically require root privileges.
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_CTX_HANDLE = 1,
+
+ /**
+ * A value of 1 requests the inclusion of raw OA unit reports as
+ * part of stream samples.
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_SAMPLE_OA,
+
+ /**
+ * The value specifies which set of OA unit metrics should be
+ * be configured, defining the contents of any OA unit reports.
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_OA_METRICS_SET,
+
+ /**
+ * The value specifies the size and layout of OA unit reports.
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_OA_FORMAT,
+
+ /**
+ * Specifying this property implicitly requests periodic OA unit
+ * sampling and (at least on Haswell) the sampling frequency is derived
+ * from this exponent as follows:
+ *
+ * 80ns * 2^(period_exponent + 1)
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_OA_EXPONENT,
+
+ /**
+ * Specifying this property is only valid when specify a context to
+ * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
+ * will hold preemption of the particular context we want to gather
+ * performance data about. The execbuf2 submissions must include a
+ * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
+ *
+ * This property is available in perf revision 3.
+ */
+ DRM_I915_PERF_PROP_HOLD_PREEMPTION,
+
+ DRM_I915_PERF_PROP_MAX /* non-ABI */
+};
+
+struct drm_i915_perf_open_param {
+ __u32 flags;
+#define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
+#define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
+#define I915_PERF_FLAG_DISABLED (1<<2)
+
+ /** The number of u64 (id, value) pairs */
+ __u32 num_properties;
+
+ /**
+ * Pointer to array of u64 (id, value) pairs configuring the stream
+ * to open.
+ */
+ __u64 properties_ptr;
+};
+
+/**
+ * Enable data capture for a stream that was either opened in a disabled state
+ * via I915_PERF_FLAG_DISABLED or was later disabled via
+ * I915_PERF_IOCTL_DISABLE.
+ *
+ * It is intended to be cheaper to disable and enable a stream than it may be
+ * to close and re-open a stream with the same configuration.
+ *
+ * It's undefined whether any pending data for the stream will be lost.
+ *
+ * This ioctl is available in perf revision 1.
+ */
+#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
+
+/**
+ * Disable data capture for a stream.
+ *
+ * It is an error to try and read a stream that is disabled.
+ *
+ * This ioctl is available in perf revision 1.
+ */
+#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
+
+/**
+ * Change metrics_set captured by a stream.
+ *
+ * If the stream is bound to a specific context, the configuration change
+ * will performed inline with that context such that it takes effect before
+ * the next execbuf submission.
+ *
+ * Returns the previously bound metrics set id, or a negative error code.
+ *
+ * This ioctl is available in perf revision 2.
+ */
+#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
+
+/**
+ * Common to all i915 perf records
+ */
+struct drm_i915_perf_record_header {
+ __u32 type;
+ __u16 pad;
+ __u16 size;
+};
+
+enum drm_i915_perf_record_type {
+
+ /**
+ * Samples are the work horse record type whose contents are extensible
+ * and defined when opening an i915 perf stream based on the given
+ * properties.
+ *
+ * Boolean properties following the naming convention
+ * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
+ * every sample.
+ *
+ * The order of these sample properties given by userspace has no
+ * affect on the ordering of data within a sample. The order is
+ * documented here.
+ *
+ * struct {
+ * struct drm_i915_perf_record_header header;
+ *
+ * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
+ * };
+ */
+ DRM_I915_PERF_RECORD_SAMPLE = 1,
+
+ /*
+ * Indicates that one or more OA reports were not written by the
+ * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
+ * command collides with periodic sampling - which would be more likely
+ * at higher sampling frequencies.
+ */
+ DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
+
+ /**
+ * An error occurred that resulted in all pending OA reports being lost.
+ */
+ DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
+
+ DRM_I915_PERF_RECORD_MAX /* non-ABI */
+};
+
+/**
+ * Structure to upload perf dynamic configuration into the kernel.
+ */
+struct drm_i915_perf_oa_config {
+ /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+ char uuid[36];
+
+ __u32 n_mux_regs;
+ __u32 n_boolean_regs;
+ __u32 n_flex_regs;
+
+ /*
+ * These fields are pointers to tuples of u32 values (register address,
+ * value). For example the expected length of the buffer pointed by
+ * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ */
+ __u64 mux_regs_ptr;
+ __u64 boolean_regs_ptr;
+ __u64 flex_regs_ptr;
+};
+
+struct drm_i915_query_item {
+ __u64 query_id;
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
+/* Must be kept compact -- no holes and well documented */
+
+ /*
+ * When set to zero by userspace, this is filled with the size of the
+ * data to be written at the data_ptr pointer. The kernel sets this
+ * value to a negative value to signal an error on a particular query
+ * item.
+ */
+ __s32 length;
+
+ /*
+ * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ *
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * following :
+ * - DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ */
+ __u32 flags;
+#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
+
+ /*
+ * Data will be written at the location pointed by data_ptr when the
+ * value of length matches the length of the data to be written by the
+ * kernel.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_query {
+ __u32 num_items;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * This points to an array of num_items drm_i915_query_item structures.
+ */
+ __u64 items_ptr;
+};
+
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
+ *
+ * data: contains the 3 pieces of information :
+ *
+ * - the slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the following
+ * formula :
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * - the subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. Gen12 has dual-subslices, which are
+ * similar to two gen11 subslices. For gen12, this array represents dual-
+ * subslices. The availability of subslice Y in slice X can be queried
+ * with the following formula :
+ *
+ * (data[subslice_offset +
+ * X * subslice_stride +
+ * Y / 8] >> (Y % 8)) & 1
+ *
+ * - the EU mask for each subslice in each slice with one bit per EU telling
+ * whether an EU is available. The availability of EU Z in subslice Y in
+ * slice X can be queried with the following formula :
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8] >> (Z % 8)) & 1
+ */
+struct drm_i915_query_topology_info {
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u16 flags;
+
+ __u16 max_slices;
+ __u16 max_subslices;
+ __u16 max_eus_per_subslice;
+
+ /*
+ * Offset in data[] at which the subslice masks are stored.
+ */
+ __u16 subslice_offset;
+
+ /*
+ * Stride at which each of the subslice masks for each slice are
+ * stored.
+ */
+ __u16 subslice_stride;
+
+ /*
+ * Offset in data[] at which the EU masks are stored.
+ */
+ __u16 eu_offset;
+
+ /*
+ * Stride at which each of the EU masks for each subslice are stored.
+ */
+ __u16 eu_stride;
+
+ __u8 data[];
+};
+
+/**
+ * struct drm_i915_engine_info
+ *
+ * Describes one engine and it's capabilities as known to the driver.
+ */
+struct drm_i915_engine_info {
+ /** Engine class and instance. */
+ struct i915_engine_class_instance engine;
+
+ /** Reserved field. */
+ __u32 rsvd0;
+
+ /** Engine flags. */
+ __u64 flags;
+
+ /** Capabilities of this engine. */
+ __u64 capabilities;
+#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
+#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
+
+ /** Reserved fields. */
+ __u64 rsvd1[4];
+};
+
+/**
+ * struct drm_i915_query_engine_info
+ *
+ * Engine info query enumerates all engines known to the driver by filling in
+ * an array of struct drm_i915_engine_info structures.
+ */
+struct drm_i915_query_engine_info {
+ /** Number of struct drm_i915_engine_info structs following. */
+ __u32 num_engines;
+
+ /** MBZ */
+ __u32 rsvd[3];
+
+ /** Marker for drm_i915_engine_info structures. */
+ struct drm_i915_engine_info engines[];
+};
+
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+ */
+struct drm_i915_query_perf_config {
+ union {
+ /*
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
+ * this fields to the number of configurations available.
+ */
+ __u64 n_configs;
+
+ /*
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
+ * i915 will use the value in this field as configuration
+ * identifier to decide what data to write into config_ptr.
+ */
+ __u64 config;
+
+ /*
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
+ * i915 will use the value in this field as configuration
+ * identifier to decide what data to write into config_ptr.
+ *
+ * String formatted like "%08x-%04x-%04x-%04x-%012x"
+ */
+ char uuid[36];
+ };
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
+ * write an array of __u64 of configuration identifiers.
+ *
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
+ * write a struct drm_i915_perf_oa_config. If the following fields of
+ * drm_i915_perf_oa_config are set not set to 0, i915 will write into
+ * the associated pointers the values of submitted when the
+ * configuration was created :
+ *
+ * - n_mux_regs
+ * - n_boolean_regs
+ * - n_flex_regs
+ */
+ __u8 data[];
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_I915_DRM_H_ */
diff --git a/chromium/third_party/minigbm/src/virgl_hw.h b/chromium/third_party/minigbm/src/external/virgl_hw.h
index 145780bf83e..7f4a63ffc81 100644
--- a/chromium/third_party/minigbm/src/virgl_hw.h
+++ b/chromium/third_party/minigbm/src/external/virgl_hw.h
@@ -69,7 +69,7 @@ enum virgl_formats {
VIRGL_FORMAT_R8_UNORM = 64,
VIRGL_FORMAT_R8G8_UNORM = 65,
-
+ VIRGL_FORMAT_R8G8B8_UNORM = 66,
VIRGL_FORMAT_R8G8B8A8_UNORM = 67,
VIRGL_FORMAT_R8_SNORM = 74,
@@ -290,6 +290,18 @@ enum virgl_formats {
#define VIRGL_BIND_LINEAR (1 << 22)
+#define VIRGL_BIND_SHARED_SUBFLAGS (0xff << 24)
+
+#define VIRGL_BIND_MINIGBM_CAMERA_WRITE (1 << 24)
+#define VIRGL_BIND_MINIGBM_CAMERA_READ (1 << 25)
+#define VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER (1 << 26)
+#define VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER (1 << 27)
+#define VIRGL_BIND_MINIGBM_SW_READ_OFTEN (1 << 28)
+#define VIRGL_BIND_MINIGBM_SW_READ_RARELY (1 << 29)
+#define VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN (1 << 30)
+#define VIRGL_BIND_MINIGBM_SW_WRITE_RARELY (1 << 31)
+#define VIRGL_BIND_MINIGBM_PROTECTED (0xf << 28) // Mutually exclusive with SW_ flags
+
struct virgl_caps_bool_set1 {
unsigned indep_blend_enable:1;
unsigned indep_blend_func:1;
diff --git a/chromium/third_party/minigbm/src/external/virgl_protocol.h b/chromium/third_party/minigbm/src/external/virgl_protocol.h
new file mode 100644
index 00000000000..d9884dfbcdb
--- /dev/null
+++ b/chromium/third_party/minigbm/src/external/virgl_protocol.h
@@ -0,0 +1,632 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRGL_PROTOCOL_H
+#define VIRGL_PROTOCOL_H
+
+#define VIRGL_QUERY_STATE_NEW 0
+#define VIRGL_QUERY_STATE_DONE 1
+#define VIRGL_QUERY_STATE_WAIT_HOST 2
+
+struct virgl_host_query_state {
+ uint32_t query_state;
+ uint32_t result_size;
+ uint64_t result;
+};
+
+enum virgl_object_type {
+ VIRGL_OBJECT_NULL,
+ VIRGL_OBJECT_BLEND,
+ VIRGL_OBJECT_RASTERIZER,
+ VIRGL_OBJECT_DSA,
+ VIRGL_OBJECT_SHADER,
+ VIRGL_OBJECT_VERTEX_ELEMENTS,
+ VIRGL_OBJECT_SAMPLER_VIEW,
+ VIRGL_OBJECT_SAMPLER_STATE,
+ VIRGL_OBJECT_SURFACE,
+ VIRGL_OBJECT_QUERY,
+ VIRGL_OBJECT_STREAMOUT_TARGET,
+ VIRGL_MAX_OBJECTS,
+};
+
+/* context cmds to be encoded in the command stream */
+enum virgl_context_cmd {
+ VIRGL_CCMD_NOP = 0,
+ VIRGL_CCMD_CREATE_OBJECT = 1,
+ VIRGL_CCMD_BIND_OBJECT,
+ VIRGL_CCMD_DESTROY_OBJECT,
+ VIRGL_CCMD_SET_VIEWPORT_STATE,
+ VIRGL_CCMD_SET_FRAMEBUFFER_STATE,
+ VIRGL_CCMD_SET_VERTEX_BUFFERS,
+ VIRGL_CCMD_CLEAR,
+ VIRGL_CCMD_DRAW_VBO,
+ VIRGL_CCMD_RESOURCE_INLINE_WRITE,
+ VIRGL_CCMD_SET_SAMPLER_VIEWS,
+ VIRGL_CCMD_SET_INDEX_BUFFER,
+ VIRGL_CCMD_SET_CONSTANT_BUFFER,
+ VIRGL_CCMD_SET_STENCIL_REF,
+ VIRGL_CCMD_SET_BLEND_COLOR,
+ VIRGL_CCMD_SET_SCISSOR_STATE,
+ VIRGL_CCMD_BLIT,
+ VIRGL_CCMD_RESOURCE_COPY_REGION,
+ VIRGL_CCMD_BIND_SAMPLER_STATES,
+ VIRGL_CCMD_BEGIN_QUERY,
+ VIRGL_CCMD_END_QUERY,
+ VIRGL_CCMD_GET_QUERY_RESULT,
+ VIRGL_CCMD_SET_POLYGON_STIPPLE,
+ VIRGL_CCMD_SET_CLIP_STATE,
+ VIRGL_CCMD_SET_SAMPLE_MASK,
+ VIRGL_CCMD_SET_STREAMOUT_TARGETS,
+ VIRGL_CCMD_SET_RENDER_CONDITION,
+ VIRGL_CCMD_SET_UNIFORM_BUFFER,
+
+ VIRGL_CCMD_SET_SUB_CTX,
+ VIRGL_CCMD_CREATE_SUB_CTX,
+ VIRGL_CCMD_DESTROY_SUB_CTX,
+ VIRGL_CCMD_BIND_SHADER,
+ VIRGL_CCMD_SET_TESS_STATE,
+ VIRGL_CCMD_SET_MIN_SAMPLES,
+ VIRGL_CCMD_SET_SHADER_BUFFERS,
+ VIRGL_CCMD_SET_SHADER_IMAGES,
+ VIRGL_CCMD_MEMORY_BARRIER,
+ VIRGL_CCMD_LAUNCH_GRID,
+ VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH,
+ VIRGL_CCMD_TEXTURE_BARRIER,
+ VIRGL_CCMD_SET_ATOMIC_BUFFERS,
+ VIRGL_CCMD_SET_DEBUG_FLAGS,
+ VIRGL_CCMD_GET_QUERY_RESULT_QBO,
+ VIRGL_CCMD_TRANSFER3D,
+ VIRGL_CCMD_END_TRANSFERS,
+ VIRGL_CCMD_COPY_TRANSFER3D,
+ VIRGL_CCMD_SET_TWEAKS,
+ VIRGL_CCMD_CLEAR_TEXTURE,
+ VIRGL_CCMD_PIPE_RESOURCE_CREATE,
+};
+
+/*
+ 8-bit cmd headers
+ 8-bit object type
+ 16-bit length
+*/
+
+#define VIRGL_CMD0(cmd, obj, len) ((cmd) | ((obj) << 8) | ((len) << 16))
+#define VIRGL_CMD0_MAX_DWORDS (((1ULL << 16) - 1) / 4) * 4
+
+/* hw specification */
+#define VIRGL_MAX_COLOR_BUFS 8
+#define VIRGL_MAX_CLIP_PLANES 8
+
+#define VIRGL_OBJ_CREATE_HEADER 0
+#define VIRGL_OBJ_CREATE_HANDLE 1
+
+#define VIRGL_OBJ_BIND_HEADER 0
+#define VIRGL_OBJ_BIND_HANDLE 1
+
+#define VIRGL_OBJ_DESTROY_HANDLE 1
+
+/* some of these defines are a specification - not used in the code */
+/* bit offsets for blend state object */
+#define VIRGL_OBJ_BLEND_SIZE (VIRGL_MAX_COLOR_BUFS + 3)
+#define VIRGL_OBJ_BLEND_HANDLE 1
+#define VIRGL_OBJ_BLEND_S0 2
+#define VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(x) ((x) & 0x1 << 0)
+#define VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(x) (((x) & 0x1) << 1)
+#define VIRGL_OBJ_BLEND_S0_DITHER(x) (((x) & 0x1) << 2)
+#define VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(x) (((x) & 0x1) << 3)
+#define VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(x) (((x) & 0x1) << 4)
+#define VIRGL_OBJ_BLEND_S1 3
+#define VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(x) (((x) & 0xf) << 0)
+/* repeated once per number of cbufs */
+
+#define VIRGL_OBJ_BLEND_S2(cbuf) (4 + (cbuf))
+#define VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(x) (((x) & 0x7) << 1)
+#define VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(x) (((x) & 0x1f) << 4)
+#define VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(x) (((x) & 0x1f) << 9)
+#define VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(x) (((x) & 0x7) << 14)
+#define VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(x) (((x) & 0x1f) << 17)
+#define VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(x) (((x) & 0x1f) << 22)
+#define VIRGL_OBJ_BLEND_S2_RT_COLORMASK(x) (((x) & 0xf) << 27)
+
+/* bit offsets for DSA state */
+#define VIRGL_OBJ_DSA_SIZE 5
+#define VIRGL_OBJ_DSA_HANDLE 1
+#define VIRGL_OBJ_DSA_S0 2
+#define VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(x) (((x) & 0x1) << 1)
+#define VIRGL_OBJ_DSA_S0_DEPTH_FUNC(x) (((x) & 0x7) << 2)
+#define VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(x) (((x) & 0x1) << 8)
+#define VIRGL_OBJ_DSA_S0_ALPHA_FUNC(x) (((x) & 0x7) << 9)
+#define VIRGL_OBJ_DSA_S1 3
+#define VIRGL_OBJ_DSA_S2 4
+#define VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_DSA_S1_STENCIL_FUNC(x) (((x) & 0x7) << 1)
+#define VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(x) (((x) & 0x7) << 4)
+#define VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(x) (((x) & 0x7) << 7)
+#define VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(x) (((x) & 0x7) << 10)
+#define VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(x) (((x) & 0xff) << 13)
+#define VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(x) (((x) & 0xff) << 21)
+#define VIRGL_OBJ_DSA_ALPHA_REF 5
+
+/* offsets for rasterizer state */
+#define VIRGL_OBJ_RS_SIZE 9
+#define VIRGL_OBJ_RS_HANDLE 1
+#define VIRGL_OBJ_RS_S0 2
+#define VIRGL_OBJ_RS_S0_FLATSHADE(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_RS_S0_DEPTH_CLIP(x) (((x) & 0x1) << 1)
+#define VIRGL_OBJ_RS_S0_CLIP_HALFZ(x) (((x) & 0x1) << 2)
+#define VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(x) (((x) & 0x1) << 3)
+#define VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(x) (((x) & 0x1) << 4)
+#define VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(x) (((x) & 0x1) << 5)
+#define VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(x) (((x) & 0x1) << 6)
+#define VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(x) (((x) & 0x1) << 7)
+#define VIRGL_OBJ_RS_S0_CULL_FACE(x) (((x) & 0x3) << 8)
+#define VIRGL_OBJ_RS_S0_FILL_FRONT(x) (((x) & 0x3) << 10)
+#define VIRGL_OBJ_RS_S0_FILL_BACK(x) (((x) & 0x3) << 12)
+#define VIRGL_OBJ_RS_S0_SCISSOR(x) (((x) & 0x1) << 14)
+#define VIRGL_OBJ_RS_S0_FRONT_CCW(x) (((x) & 0x1) << 15)
+#define VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(x) (((x) & 0x1) << 16)
+#define VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(x) (((x) & 0x1) << 17)
+#define VIRGL_OBJ_RS_S0_OFFSET_LINE(x) (((x) & 0x1) << 18)
+#define VIRGL_OBJ_RS_S0_OFFSET_POINT(x) (((x) & 0x1) << 19)
+#define VIRGL_OBJ_RS_S0_OFFSET_TRI(x) (((x) & 0x1) << 20)
+#define VIRGL_OBJ_RS_S0_POLY_SMOOTH(x) (((x) & 0x1) << 21)
+#define VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(x) (((x) & 0x1) << 22)
+#define VIRGL_OBJ_RS_S0_POINT_SMOOTH(x) (((x) & 0x1) << 23)
+#define VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(x) (((x) & 0x1) << 24)
+#define VIRGL_OBJ_RS_S0_MULTISAMPLE(x) (((x) & 0x1) << 25)
+#define VIRGL_OBJ_RS_S0_LINE_SMOOTH(x) (((x) & 0x1) << 26)
+#define VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(x) (((x) & 0x1) << 27)
+#define VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(x) (((x) & 0x1) << 28)
+#define VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(x) (((x) & 0x1) << 29)
+#define VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(x) (((x) & 0x1) << 30)
+#define VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(x) (((x) & 0x1) << 31)
+
+#define VIRGL_OBJ_RS_POINT_SIZE 3
+#define VIRGL_OBJ_RS_SPRITE_COORD_ENABLE 4
+#define VIRGL_OBJ_RS_S3 5
+
+#define VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(x) (((x) & 0xffff) << 0)
+#define VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(x) (((x) & 0xff) << 16)
+#define VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(x) (((x) & 0xff) << 24)
+#define VIRGL_OBJ_RS_LINE_WIDTH 6
+#define VIRGL_OBJ_RS_OFFSET_UNITS 7
+#define VIRGL_OBJ_RS_OFFSET_SCALE 8
+#define VIRGL_OBJ_RS_OFFSET_CLAMP 9
+
+#define VIRGL_OBJ_CLEAR_SIZE 8
+#define VIRGL_OBJ_CLEAR_BUFFERS 1
+#define VIRGL_OBJ_CLEAR_COLOR_0 2 /* color is 4 * u32/f32/i32 */
+#define VIRGL_OBJ_CLEAR_COLOR_1 3
+#define VIRGL_OBJ_CLEAR_COLOR_2 4
+#define VIRGL_OBJ_CLEAR_COLOR_3 5
+#define VIRGL_OBJ_CLEAR_DEPTH_0 6 /* depth is a double precision float */
+#define VIRGL_OBJ_CLEAR_DEPTH_1 7
+#define VIRGL_OBJ_CLEAR_STENCIL 8
+
+/* shader object */
+#define VIRGL_OBJ_SHADER_HDR_SIZE(nso) (5 + ((nso) ? (2 * nso) + 4 : 0))
+#define VIRGL_OBJ_SHADER_HANDLE 1
+#define VIRGL_OBJ_SHADER_TYPE 2
+#define VIRGL_OBJ_SHADER_OFFSET 3
+#define VIRGL_OBJ_SHADER_OFFSET_VAL(x) (((x) & 0x7fffffff) << 0)
+/* start contains full length in VAL - also implies continuations */
+/* continuation contains offset in VAL */
+#define VIRGL_OBJ_SHADER_OFFSET_CONT (0x1u << 31)
+#define VIRGL_OBJ_SHADER_NUM_TOKENS 4
+#define VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS 5
+#define VIRGL_OBJ_SHADER_SO_STRIDE(x) (6 + (x))
+#define VIRGL_OBJ_SHADER_SO_OUTPUT0(x) (10 + (x * 2))
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(x) (((x) & 0xff) << 0)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(x) (((x) & 0x3) << 8)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(x) (((x) & 0x7) << 10)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(x) (((x) & 0x7) << 13)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(x) (((x) & 0xffff) << 16)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT0_SO(x) (11 + (x * 2))
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_STREAM(x) (((x) & 0x03) << 0)
+
+/* viewport state */
+#define VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports) ((6 * num_viewports) + 1)
+#define VIRGL_SET_VIEWPORT_START_SLOT 1
+#define VIRGL_SET_VIEWPORT_STATE_SCALE_0(x) (2 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_SCALE_1(x) (3 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_SCALE_2(x) (4 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_TRANSLATE_0(x) (5 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_TRANSLATE_1(x) (6 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_TRANSLATE_2(x) (7 + (x * 6))
+
+/* framebuffer state */
+#define VIRGL_SET_FRAMEBUFFER_STATE_SIZE(nr_cbufs) (nr_cbufs + 2)
+#define VIRGL_SET_FRAMEBUFFER_STATE_NR_CBUFS 1
+#define VIRGL_SET_FRAMEBUFFER_STATE_NR_ZSURF_HANDLE 2
+#define VIRGL_SET_FRAMEBUFFER_STATE_CBUF_HANDLE(x) ((x) + 3)
+
+/* vertex elements object */
+#define VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements) (((num_elements) * 4) + 1)
+#define VIRGL_OBJ_VERTEX_ELEMENTS_HANDLE 1
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_OFFSET(x) (((x) * 4) + 2) /* repeated per VE */
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_INSTANCE_DIVISOR(x) (((x) * 4) + 3)
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_VERTEX_BUFFER_INDEX(x) (((x) * 4) + 4)
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_FORMAT(x) (((x) * 4) + 5)
+
+/* vertex buffers */
+#define VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers) ((num_buffers) * 3)
+#define VIRGL_SET_VERTEX_BUFFER_STRIDE(x) (((x) * 3) + 1)
+#define VIRGL_SET_VERTEX_BUFFER_OFFSET(x) (((x) * 3) + 2)
+#define VIRGL_SET_VERTEX_BUFFER_HANDLE(x) (((x) * 3) + 3)
+
+/* index buffer */
+#define VIRGL_SET_INDEX_BUFFER_SIZE(ib) (((ib) ? 2 : 0) + 1)
+#define VIRGL_SET_INDEX_BUFFER_HANDLE 1
+#define VIRGL_SET_INDEX_BUFFER_INDEX_SIZE 2 /* only if sending an IB handle */
+#define VIRGL_SET_INDEX_BUFFER_OFFSET 3 /* only if sending an IB handle */
+
+/* constant buffer */
+#define VIRGL_SET_CONSTANT_BUFFER_SHADER_TYPE 1
+#define VIRGL_SET_CONSTANT_BUFFER_INDEX 2
+#define VIRGL_SET_CONSTANT_BUFFER_DATA_START 3
+
+#define VIRGL_SET_UNIFORM_BUFFER_SIZE 5
+#define VIRGL_SET_UNIFORM_BUFFER_SHADER_TYPE 1
+#define VIRGL_SET_UNIFORM_BUFFER_INDEX 2
+#define VIRGL_SET_UNIFORM_BUFFER_OFFSET 3
+#define VIRGL_SET_UNIFORM_BUFFER_LENGTH 4
+#define VIRGL_SET_UNIFORM_BUFFER_RES_HANDLE 5
+
+/* draw VBO */
+#define VIRGL_DRAW_VBO_SIZE 12
+#define VIRGL_DRAW_VBO_SIZE_TESS 14
+#define VIRGL_DRAW_VBO_SIZE_INDIRECT 20
+#define VIRGL_DRAW_VBO_START 1
+#define VIRGL_DRAW_VBO_COUNT 2
+#define VIRGL_DRAW_VBO_MODE 3
+#define VIRGL_DRAW_VBO_INDEXED 4
+#define VIRGL_DRAW_VBO_INSTANCE_COUNT 5
+#define VIRGL_DRAW_VBO_INDEX_BIAS 6
+#define VIRGL_DRAW_VBO_START_INSTANCE 7
+#define VIRGL_DRAW_VBO_PRIMITIVE_RESTART 8
+#define VIRGL_DRAW_VBO_RESTART_INDEX 9
+#define VIRGL_DRAW_VBO_MIN_INDEX 10
+#define VIRGL_DRAW_VBO_MAX_INDEX 11
+#define VIRGL_DRAW_VBO_COUNT_FROM_SO 12
+/* tess packet */
+#define VIRGL_DRAW_VBO_VERTICES_PER_PATCH 13
+#define VIRGL_DRAW_VBO_DRAWID 14
+/* indirect packet */
+#define VIRGL_DRAW_VBO_INDIRECT_HANDLE 15
+#define VIRGL_DRAW_VBO_INDIRECT_OFFSET 16
+#define VIRGL_DRAW_VBO_INDIRECT_STRIDE 17
+#define VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT 18
+#define VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_OFFSET 19
+#define VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_HANDLE 20
+
+/* create surface */
+#define VIRGL_OBJ_SURFACE_SIZE 5
+#define VIRGL_OBJ_SURFACE_HANDLE 1
+#define VIRGL_OBJ_SURFACE_RES_HANDLE 2
+#define VIRGL_OBJ_SURFACE_FORMAT 3
+#define VIRGL_OBJ_SURFACE_BUFFER_FIRST_ELEMENT 4
+#define VIRGL_OBJ_SURFACE_BUFFER_LAST_ELEMENT 5
+#define VIRGL_OBJ_SURFACE_TEXTURE_LEVEL 4
+#define VIRGL_OBJ_SURFACE_TEXTURE_LAYERS 5
+
+/* create streamout target */
+#define VIRGL_OBJ_STREAMOUT_SIZE 4
+#define VIRGL_OBJ_STREAMOUT_HANDLE 1
+#define VIRGL_OBJ_STREAMOUT_RES_HANDLE 2
+#define VIRGL_OBJ_STREAMOUT_BUFFER_OFFSET 3
+#define VIRGL_OBJ_STREAMOUT_BUFFER_SIZE 4
+
+/* sampler state */
+#define VIRGL_OBJ_SAMPLER_STATE_SIZE 9
+#define VIRGL_OBJ_SAMPLER_STATE_HANDLE 1
+#define VIRGL_OBJ_SAMPLER_STATE_S0 2
+#define VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(x) (((x) & 0x7) << 0)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(x) (((x) & 0x7) << 3)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(x) (((x) & 0x7) << 6)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(x) (((x) & 0x3) << 9)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(x) (((x) & 0x3) << 11)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(x) (((x) & 0x3) << 13)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(x) (((x) & 0x1) << 15)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(x) (((x) & 0x7) << 16)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(x) (((x) & 0x1) << 19)
+
+#define VIRGL_OBJ_SAMPLER_STATE_LOD_BIAS 3
+#define VIRGL_OBJ_SAMPLER_STATE_MIN_LOD 4
+#define VIRGL_OBJ_SAMPLER_STATE_MAX_LOD 5
+#define VIRGL_OBJ_SAMPLER_STATE_BORDER_COLOR(x) ((x) + 6) /* 6 - 9 */
+
+
+/* sampler view */
+#define VIRGL_OBJ_SAMPLER_VIEW_SIZE 6
+#define VIRGL_OBJ_SAMPLER_VIEW_HANDLE 1
+#define VIRGL_OBJ_SAMPLER_VIEW_RES_HANDLE 2
+#define VIRGL_OBJ_SAMPLER_VIEW_FORMAT 3
+#define VIRGL_OBJ_SAMPLER_VIEW_BUFFER_FIRST_ELEMENT 4
+#define VIRGL_OBJ_SAMPLER_VIEW_BUFFER_LAST_ELEMENT 5
+#define VIRGL_OBJ_SAMPLER_VIEW_TEXTURE_LAYER 4
+#define VIRGL_OBJ_SAMPLER_VIEW_TEXTURE_LEVEL 5
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE 6
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(x) (((x) & 0x7) << 0)
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(x) (((x) & 0x7) << 3)
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(x) (((x) & 0x7) << 6)
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(x) (((x) & 0x7) << 9)
+
+/* set sampler views */
+#define VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views) ((num_views) + 2)
+#define VIRGL_SET_SAMPLER_VIEWS_SHADER_TYPE 1
+#define VIRGL_SET_SAMPLER_VIEWS_START_SLOT 2
+#define VIRGL_SET_SAMPLER_VIEWS_V0_HANDLE 3
+
+/* bind sampler states */
+#define VIRGL_BIND_SAMPLER_STATES(num_states) ((num_states) + 2)
+#define VIRGL_BIND_SAMPLER_STATES_SHADER_TYPE 1
+#define VIRGL_BIND_SAMPLER_STATES_START_SLOT 2
+#define VIRGL_BIND_SAMPLER_STATES_S0_HANDLE 3
+
+/* set stencil reference */
+#define VIRGL_SET_STENCIL_REF_SIZE 1
+#define VIRGL_SET_STENCIL_REF 1
+#define VIRGL_STENCIL_REF_VAL(f, s) ((f & 0xff) | (((s & 0xff) << 8)))
+
+/* set blend color */
+#define VIRGL_SET_BLEND_COLOR_SIZE 4
+#define VIRGL_SET_BLEND_COLOR(x) ((x) + 1)
+
+/* set scissor state */
+#define VIRGL_SET_SCISSOR_STATE_SIZE(x) (1 + 2 * x)
+#define VIRGL_SET_SCISSOR_START_SLOT 1
+#define VIRGL_SET_SCISSOR_MINX_MINY(x) (2 + (x * 2))
+#define VIRGL_SET_SCISSOR_MAXX_MAXY(x) (3 + (x * 2))
+
+/* resource copy region */
+#define VIRGL_CMD_RESOURCE_COPY_REGION_SIZE 13
+#define VIRGL_CMD_RCR_DST_RES_HANDLE 1
+#define VIRGL_CMD_RCR_DST_LEVEL 2
+#define VIRGL_CMD_RCR_DST_X 3
+#define VIRGL_CMD_RCR_DST_Y 4
+#define VIRGL_CMD_RCR_DST_Z 5
+#define VIRGL_CMD_RCR_SRC_RES_HANDLE 6
+#define VIRGL_CMD_RCR_SRC_LEVEL 7
+#define VIRGL_CMD_RCR_SRC_X 8
+#define VIRGL_CMD_RCR_SRC_Y 9
+#define VIRGL_CMD_RCR_SRC_Z 10
+#define VIRGL_CMD_RCR_SRC_W 11
+#define VIRGL_CMD_RCR_SRC_H 12
+#define VIRGL_CMD_RCR_SRC_D 13
+
+/* blit */
+#define VIRGL_CMD_BLIT_SIZE 21
+#define VIRGL_CMD_BLIT_S0 1
+#define VIRGL_CMD_BLIT_S0_MASK(x) (((x) & 0xff) << 0)
+#define VIRGL_CMD_BLIT_S0_FILTER(x) (((x) & 0x3) << 8)
+#define VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(x) (((x) & 0x1) << 10)
+#define VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(x) (((x) & 0x1) << 11)
+#define VIRGL_CMD_BLIT_S0_ALPHA_BLEND(x) (((x) & 0x1) << 12)
+#define VIRGL_CMD_BLIT_SCISSOR_MINX_MINY 2
+#define VIRGL_CMD_BLIT_SCISSOR_MAXX_MAXY 3
+#define VIRGL_CMD_BLIT_DST_RES_HANDLE 4
+#define VIRGL_CMD_BLIT_DST_LEVEL 5
+#define VIRGL_CMD_BLIT_DST_FORMAT 6
+#define VIRGL_CMD_BLIT_DST_X 7
+#define VIRGL_CMD_BLIT_DST_Y 8
+#define VIRGL_CMD_BLIT_DST_Z 9
+#define VIRGL_CMD_BLIT_DST_W 10
+#define VIRGL_CMD_BLIT_DST_H 11
+#define VIRGL_CMD_BLIT_DST_D 12
+#define VIRGL_CMD_BLIT_SRC_RES_HANDLE 13
+#define VIRGL_CMD_BLIT_SRC_LEVEL 14
+#define VIRGL_CMD_BLIT_SRC_FORMAT 15
+#define VIRGL_CMD_BLIT_SRC_X 16
+#define VIRGL_CMD_BLIT_SRC_Y 17
+#define VIRGL_CMD_BLIT_SRC_Z 18
+#define VIRGL_CMD_BLIT_SRC_W 19
+#define VIRGL_CMD_BLIT_SRC_H 20
+#define VIRGL_CMD_BLIT_SRC_D 21
+
+/* query object */
+#define VIRGL_OBJ_QUERY_SIZE 4
+#define VIRGL_OBJ_QUERY_HANDLE 1
+#define VIRGL_OBJ_QUERY_TYPE_INDEX 2
+#define VIRGL_OBJ_QUERY_TYPE(x) (x & 0xffff)
+#define VIRGL_OBJ_QUERY_INDEX(x) ((x & 0xffff) << 16)
+#define VIRGL_OBJ_QUERY_OFFSET 3
+#define VIRGL_OBJ_QUERY_RES_HANDLE 4
+
+#define VIRGL_QUERY_BEGIN_HANDLE 1
+
+#define VIRGL_QUERY_END_HANDLE 1
+
+#define VIRGL_QUERY_RESULT_SIZE 2
+#define VIRGL_QUERY_RESULT_HANDLE 1
+#define VIRGL_QUERY_RESULT_WAIT 2
+
+/* render condition */
+#define VIRGL_RENDER_CONDITION_SIZE 3
+#define VIRGL_RENDER_CONDITION_HANDLE 1
+#define VIRGL_RENDER_CONDITION_CONDITION 2
+#define VIRGL_RENDER_CONDITION_MODE 3
+
+/* resource inline write */
+#define VIRGL_RESOURCE_IW_RES_HANDLE 1
+#define VIRGL_RESOURCE_IW_LEVEL 2
+#define VIRGL_RESOURCE_IW_USAGE 3
+#define VIRGL_RESOURCE_IW_STRIDE 4
+#define VIRGL_RESOURCE_IW_LAYER_STRIDE 5
+#define VIRGL_RESOURCE_IW_X 6
+#define VIRGL_RESOURCE_IW_Y 7
+#define VIRGL_RESOURCE_IW_Z 8
+#define VIRGL_RESOURCE_IW_W 9
+#define VIRGL_RESOURCE_IW_H 10
+#define VIRGL_RESOURCE_IW_D 11
+#define VIRGL_RESOURCE_IW_DATA_START 12
+
+/* set streamout targets */
+#define VIRGL_SET_STREAMOUT_TARGETS_APPEND_BITMASK 1
+#define VIRGL_SET_STREAMOUT_TARGETS_H0 2
+
+/* set sample mask */
+#define VIRGL_SET_SAMPLE_MASK_SIZE 1
+#define VIRGL_SET_SAMPLE_MASK_MASK 1
+
+/* set clip state */
+#define VIRGL_SET_CLIP_STATE_SIZE 32
+#define VIRGL_SET_CLIP_STATE_C0 1
+
+/* polygon stipple */
+#define VIRGL_POLYGON_STIPPLE_SIZE 32
+#define VIRGL_POLYGON_STIPPLE_P0 1
+
+#define VIRGL_BIND_SHADER_SIZE 2
+#define VIRGL_BIND_SHADER_HANDLE 1
+#define VIRGL_BIND_SHADER_TYPE 2
+
+/* tess state */
+#define VIRGL_TESS_STATE_SIZE 6
+
+/* set min samples */
+#define VIRGL_SET_MIN_SAMPLES_SIZE 1
+#define VIRGL_SET_MIN_SAMPLES_MASK 1
+
+/* set shader buffers */
+#define VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE 3
+#define VIRGL_SET_SHADER_BUFFER_SIZE(x) (VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE * (x)) + 2
+#define VIRGL_SET_SHADER_BUFFER_SHADER_TYPE 1
+#define VIRGL_SET_SHADER_BUFFER_START_SLOT 2
+#define VIRGL_SET_SHADER_BUFFER_OFFSET(x) ((x) * VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE + 3)
+#define VIRGL_SET_SHADER_BUFFER_LENGTH(x) ((x) * VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE + 4)
+#define VIRGL_SET_SHADER_BUFFER_RES_HANDLE(x) ((x) * VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE + 5)
+
+/* set shader images */
+#define VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE 5
+#define VIRGL_SET_SHADER_IMAGE_SIZE(x) (VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE * (x)) + 2
+#define VIRGL_SET_SHADER_IMAGE_SHADER_TYPE 1
+#define VIRGL_SET_SHADER_IMAGE_START_SLOT 2
+#define VIRGL_SET_SHADER_IMAGE_FORMAT(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 3)
+#define VIRGL_SET_SHADER_IMAGE_ACCESS(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 4)
+#define VIRGL_SET_SHADER_IMAGE_LAYER_OFFSET(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 5)
+#define VIRGL_SET_SHADER_IMAGE_LEVEL_SIZE(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 6)
+#define VIRGL_SET_SHADER_IMAGE_RES_HANDLE(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 7)
+
+/* memory barrier */
+#define VIRGL_MEMORY_BARRIER_SIZE 1
+#define VIRGL_MEMORY_BARRIER_FLAGS 1
+
+/* launch grid */
+#define VIRGL_LAUNCH_GRID_SIZE 8
+#define VIRGL_LAUNCH_BLOCK_X 1
+#define VIRGL_LAUNCH_BLOCK_Y 2
+#define VIRGL_LAUNCH_BLOCK_Z 3
+#define VIRGL_LAUNCH_GRID_X 4
+#define VIRGL_LAUNCH_GRID_Y 5
+#define VIRGL_LAUNCH_GRID_Z 6
+#define VIRGL_LAUNCH_INDIRECT_HANDLE 7
+#define VIRGL_LAUNCH_INDIRECT_OFFSET 8
+
+/* framebuffer state no attachment */
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE 2
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH_HEIGHT 1
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH(x) (x & 0xffff)
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_HEIGHT(x) ((x >> 16) & 0xffff)
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS_SAMPLES 2
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS(x) (x & 0xffff)
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SAMPLES(x) ((x >> 16) & 0xff)
+
+/* texture barrier */
+#define VIRGL_TEXTURE_BARRIER_SIZE 1
+#define VIRGL_TEXTURE_BARRIER_FLAGS 1
+
+/* hw atomics */
+#define VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE 3
+#define VIRGL_SET_ATOMIC_BUFFER_SIZE(x) (VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE * (x)) + 1
+#define VIRGL_SET_ATOMIC_BUFFER_START_SLOT 1
+#define VIRGL_SET_ATOMIC_BUFFER_OFFSET(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 2)
+#define VIRGL_SET_ATOMIC_BUFFER_LENGTH(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 3)
+#define VIRGL_SET_ATOMIC_BUFFER_RES_HANDLE(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 4)
+
+/* qbo */
+#define VIRGL_QUERY_RESULT_QBO_SIZE 6
+#define VIRGL_QUERY_RESULT_QBO_HANDLE 1
+#define VIRGL_QUERY_RESULT_QBO_QBO_HANDLE 2
+#define VIRGL_QUERY_RESULT_QBO_WAIT 3
+#define VIRGL_QUERY_RESULT_QBO_RESULT_TYPE 4
+#define VIRGL_QUERY_RESULT_QBO_OFFSET 5
+#define VIRGL_QUERY_RESULT_QBO_INDEX 6
+
+#define VIRGL_TRANSFER_TO_HOST 1
+#define VIRGL_TRANSFER_FROM_HOST 2
+
+/* Transfer */
+#define VIRGL_TRANSFER3D_SIZE 13
+/* The first 11 dwords are the same as VIRGL_RESOURCE_IW_* */
+#define VIRGL_TRANSFER3D_DATA_OFFSET 12
+#define VIRGL_TRANSFER3D_DIRECTION 13
+
+/* Copy transfer */
+#define VIRGL_COPY_TRANSFER3D_SIZE 14
+/* The first 11 dwords are the same as VIRGL_RESOURCE_IW_* */
+#define VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE 12
+#define VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET 13
+#define VIRGL_COPY_TRANSFER3D_SYNCHRONIZED 14
+
+/* set tweak flags */
+#define VIRGL_SET_TWEAKS_SIZE 2
+#define VIRGL_SET_TWEAKS_ID 1
+#define VIRGL_SET_TWEAKS_VALUE 2
+
+/* virgl create */
+#define VIRGL_PIPE_RES_CREATE_SIZE 11
+#define VIRGL_PIPE_RES_CREATE_TARGET 1
+#define VIRGL_PIPE_RES_CREATE_FORMAT 2
+#define VIRGL_PIPE_RES_CREATE_BIND 3
+#define VIRGL_PIPE_RES_CREATE_WIDTH 4
+#define VIRGL_PIPE_RES_CREATE_HEIGHT 5
+#define VIRGL_PIPE_RES_CREATE_DEPTH 6
+#define VIRGL_PIPE_RES_CREATE_ARRAY_SIZE 7
+#define VIRGL_PIPE_RES_CREATE_LAST_LEVEL 8
+#define VIRGL_PIPE_RES_CREATE_NR_SAMPLES 9
+#define VIRGL_PIPE_RES_CREATE_FLAGS 10
+#define VIRGL_PIPE_RES_CREATE_BLOB_ID 11
+
+enum vrend_tweak_type {
+ virgl_tweak_gles_brga_emulate,
+ virgl_tweak_gles_brga_apply_dest_swizzle,
+ virgl_tweak_gles_tf3_samples_passes_multiplier,
+ virgl_tweak_undefined
+};
+
+/* Clear texture */
+#define VIRGL_CLEAR_TEXTURE_SIZE 12
+#define VIRGL_TEXTURE_HANDLE 1
+#define VIRGL_TEXTURE_LEVEL 2
+#define VIRGL_TEXTURE_SRC_X 3
+#define VIRGL_TEXTURE_SRC_Y 4
+#define VIRGL_TEXTURE_SRC_Z 5
+#define VIRGL_TEXTURE_SRC_W 6
+#define VIRGL_TEXTURE_SRC_H 7
+#define VIRGL_TEXTURE_SRC_D 8
+#define VIRGL_TEXTURE_ARRAY_A 9
+#define VIRGL_TEXTURE_ARRAY_B 10
+#define VIRGL_TEXTURE_ARRAY_C 11
+#define VIRGL_TEXTURE_ARRAY_D 12
+
+#endif
diff --git a/chromium/third_party/minigbm/src/virtgpu_drm.h b/chromium/third_party/minigbm/src/external/virtgpu_drm.h
index a92d764d481..f3245201014 100644
--- a/chromium/third_party/minigbm/src/virtgpu_drm.h
+++ b/chromium/third_party/minigbm/src/external/virtgpu_drm.h
@@ -46,6 +46,7 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
@@ -71,6 +72,9 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
struct drm_virtgpu_getparam {
__u64 param;
@@ -101,9 +105,9 @@ struct drm_virtgpu_resource_info {
__u32 res_handle;
__u32 size;
union {
- __u32 stride;
+ __u32 blob_mem;
__u32 strides[4]; /* strides[0] is accessible with stride. */
- };
+ };
__u32 num_planes;
__u32 offsets[4];
__u64 format_modifier;
@@ -123,6 +127,8 @@ struct drm_virtgpu_3d_transfer_to_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
@@ -130,6 +136,8 @@ struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@@ -146,6 +154,31 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST 0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob_mem */
+ __u32 blob_mem;
+ __u32 blob_flags;
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u64 size;
+
+ /*
+ * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+ * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+ */
+ __u32 pad;
+ __u32 cmd_size;
+ __u64 cmd;
+ __u64 blob_id;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -181,6 +214,10 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
+ struct drm_virtgpu_resource_create_blob)
+
#if defined(__cplusplus)
}
#endif
diff --git a/chromium/third_party/minigbm/src/exynos.c b/chromium/third_party/minigbm/src/exynos.c
index 6a801071fdf..5862643eff3 100644
--- a/chromium/third_party/minigbm/src/exynos.c
+++ b/chromium/third_party/minigbm/src/exynos.c
@@ -64,9 +64,8 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint
int ret;
for (plane = 0; plane < bo->meta.num_planes; plane++) {
size_t size = bo->meta.sizes[plane];
- struct drm_exynos_gem_create gem_create;
+ struct drm_exynos_gem_create gem_create = { 0 };
- memset(&gem_create, 0, sizeof(gem_create));
gem_create.size = size;
gem_create.flags = EXYNOS_BO_NONCONTIG;
@@ -84,8 +83,8 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint
cleanup_planes:
for (; plane != 0; plane--) {
- struct drm_gem_close gem_close;
- memset(&gem_close, 0, sizeof(gem_close));
+ struct drm_gem_close gem_close = { 0 };
+
gem_close.handle = bo->handles[plane - 1].u32;
int gem_close_ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
if (gem_close_ret) {
diff --git a/chromium/third_party/minigbm/src/gbm.c b/chromium/third_party/minigbm/src/gbm.c
index ab5b3f7049a..935349c3684 100644
--- a/chromium/third_party/minigbm/src/gbm.c
+++ b/chromium/third_party/minigbm/src/gbm.c
@@ -194,13 +194,12 @@ PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void
uint32_t usage)
{
struct gbm_bo *bo;
- struct drv_import_fd_data drv_data;
+ struct drv_import_fd_data drv_data = { 0 };
struct gbm_import_fd_data *fd_data = buffer;
struct gbm_import_fd_modifier_data *fd_modifier_data = buffer;
uint32_t gbm_format;
size_t num_planes, i, num_fds;
- memset(&drv_data, 0, sizeof(drv_data));
drv_data.use_flags = gbm_convert_usage(usage);
switch (type) {
case GBM_BO_IMPORT_FD:
@@ -263,6 +262,12 @@ PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void
return bo;
}
+PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+ uint32_t transfer_flags, uint32_t *stride, void **map_data)
+{
+ return gbm_bo_map2(bo, x, y, width, height, transfer_flags, stride, map_data, 0);
+}
+
PUBLIC void gbm_bo_unmap(struct gbm_bo *bo, void *map_data)
{
assert(bo);
@@ -393,12 +398,6 @@ PUBLIC int gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane)
return drv_bo_get_plane_fd(bo->bo, plane);
}
-PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t transfer_flags, uint32_t *stride, void **map_data, size_t plane)
-{
- return gbm_bo_map2(bo, x, y, width, height, transfer_flags, stride, map_data, plane);
-}
-
PUBLIC void *gbm_bo_map2(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
uint32_t transfer_flags, uint32_t *stride, void **map_data, int plane)
{
diff --git a/chromium/third_party/minigbm/src/gbm.h b/chromium/third_party/minigbm/src/gbm.h
index 24927288e18..3c491ccd9ec 100644
--- a/chromium/third_party/minigbm/src/gbm.h
+++ b/chromium/third_party/minigbm/src/gbm.h
@@ -373,6 +373,11 @@ enum gbm_bo_transfer_flags {
GBM_BO_TRANSFER_READ_WRITE = (GBM_BO_TRANSFER_READ | GBM_BO_TRANSFER_WRITE),
};
+void *
+gbm_bo_map(struct gbm_bo *bo,
+ uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+ uint32_t flags, uint32_t *stride, void **map_data);
+
void
gbm_bo_unmap(struct gbm_bo *bo, void *map_data);
@@ -471,10 +476,6 @@ int
gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane);
void *
-gbm_bo_map(struct gbm_bo *bo,
- uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t flags, uint32_t *stride, void **map_data, size_t plane);
-void *
gbm_bo_map2(struct gbm_bo *bo,
uint32_t x, uint32_t y, uint32_t width, uint32_t height,
uint32_t flags, uint32_t *stride, void **map_data, int plane);
diff --git a/chromium/third_party/minigbm/src/helpers.c b/chromium/third_party/minigbm/src/helpers.c
index 22a61068027..7ed10ee5590 100644
--- a/chromium/third_party/minigbm/src/helpers.c
+++ b/chromium/third_party/minigbm/src/helpers.c
@@ -10,6 +10,8 @@
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
#include <xf86drm.h>
#include "drv_priv.h"
@@ -92,6 +94,9 @@ static const struct planar_layout *layout_from_format(uint32_t format)
case DRM_FORMAT_RGB332:
return &packed_1bpp_layout;
+ case DRM_FORMAT_R16:
+ return &packed_2bpp_layout;
+
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YVU420_ANDROID:
return &triplanar_yuv_420_layout;
@@ -184,7 +189,8 @@ size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_
if (!planes)
return 0;
- if (drv->backend->num_planes_from_modifier && modifier != DRM_FORMAT_MOD_INVALID)
+ if (drv->backend->num_planes_from_modifier && modifier != DRM_FORMAT_MOD_INVALID &&
+ modifier != DRM_FORMAT_MOD_LINEAR)
return drv->backend->num_planes_from_modifier(drv, format, modifier);
return planes;
@@ -307,23 +313,32 @@ int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32
int ret;
size_t plane;
uint32_t aligned_width, aligned_height;
- struct drm_mode_create_dumb create_dumb;
+ struct drm_mode_create_dumb create_dumb = { 0 };
aligned_width = width;
aligned_height = height;
switch (format) {
+ case DRM_FORMAT_R16:
+ /* HAL_PIXEL_FORMAT_Y16 requires that the buffer's width be 16 pixel
+ * aligned. See hardware/interfaces/graphics/common/1.0/types.hal. */
+ aligned_width = ALIGN(width, 16);
+ break;
case DRM_FORMAT_YVU420_ANDROID:
+ /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not
+ * be aligned. Update 'height' so that drv_bo_from_format below
+ * uses the non-aligned height. */
+ height = bo->meta.height;
+
/* Align width to 32 pixels, so chroma strides are 16 bytes as
* Android requires. */
aligned_width = ALIGN(width, 32);
- /* Adjust the height to include room for chroma planes.
- *
- * HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not
- * be aligned. */
- aligned_height = 3 * DIV_ROUND_UP(bo->meta.height, 2);
+
+ /* Adjust the height to include room for chroma planes. */
+ aligned_height = 3 * DIV_ROUND_UP(height, 2);
break;
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
/* Adjust the height to include room for chroma planes */
aligned_height = 3 * DIV_ROUND_UP(height, 2);
break;
@@ -331,7 +346,6 @@ int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32
break;
}
- memset(&create_dumb, 0, sizeof(create_dumb));
if (quirks & BO_QUIRK_DUMB32BPP) {
aligned_width =
DIV_ROUND_UP(aligned_width * layout_from_format(format)->bytes_per_pixel[0], 4);
@@ -366,12 +380,10 @@ int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t
int drv_dumb_bo_destroy(struct bo *bo)
{
- struct drm_mode_destroy_dumb destroy_dumb;
int ret;
+ struct drm_mode_destroy_dumb destroy_dumb = { 0 };
- memset(&destroy_dumb, 0, sizeof(destroy_dumb));
destroy_dumb.handle = bo->handles[0].u32;
-
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
if (ret) {
drv_log("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32);
@@ -437,6 +449,7 @@ int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
bo->handles[plane].u32 = prime_handle.handle;
}
+ bo->meta.tiling = data->tiling;
return 0;
}
diff --git a/chromium/third_party/minigbm/src/i915.c b/chromium/third_party/minigbm/src/i915.c
index 3ed785a0bd0..f7e2fd91856 100644
--- a/chromium/third_party/minigbm/src/i915.c
+++ b/chromium/third_party/minigbm/src/i915.c
@@ -8,7 +8,6 @@
#include <assert.h>
#include <errno.h>
-#include <i915_drm.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
@@ -17,6 +16,7 @@
#include <xf86drm.h>
#include "drv_priv.h"
+#include "external/i915_drm.h"
#include "helpers.h"
#include "util.h"
@@ -34,23 +34,60 @@ static const uint32_t render_formats[] = { DRM_FORMAT_ABGR16161616F };
static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010,
DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+static const uint64_t gen_modifier_order[] = { I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR };
+
+static const uint64_t gen11_modifier_order[] = { I915_FORMAT_MOD_Y_TILED_CCS,
+ I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR };
+
+struct modifier_support_t {
+ const uint64_t *order;
+ uint32_t count;
+};
+
struct i915_device {
uint32_t gen;
int32_t has_llc;
+ int32_t has_hw_protection;
+ struct modifier_support_t modifier;
};
static uint32_t i915_get_gen(int device_id)
{
const uint16_t gen3_ids[] = { 0x2582, 0x2592, 0x2772, 0x27A2, 0x27AE,
0x29C2, 0x29B2, 0x29D2, 0xA001, 0xA011 };
+ const uint16_t gen11_ids[] = { 0x4E71, 0x4E61, 0x4E51, 0x4E55, 0x4E57 };
+ const uint16_t gen12_ids[] = { 0x9A40, 0x9A49, 0x9A59, 0x9A60, 0x9A68, 0x9A70,
+ 0x9A78, 0x9AC0, 0x9AC9, 0x9AD9, 0x9AF8 };
unsigned i;
for (i = 0; i < ARRAY_SIZE(gen3_ids); i++)
if (gen3_ids[i] == device_id)
return 3;
+ /* Gen 11 */
+ for (i = 0; i < ARRAY_SIZE(gen11_ids); i++)
+ if (gen11_ids[i] == device_id)
+ return 11;
+
+ /* Gen 12 */
+ for (i = 0; i < ARRAY_SIZE(gen12_ids); i++)
+ if (gen12_ids[i] == device_id)
+ return 12;
return 4;
}
+static void i915_get_modifier_order(struct i915_device *i915)
+{
+ if (i915->gen == 11) {
+ i915->modifier.order = gen11_modifier_order;
+ i915->modifier.count = ARRAY_SIZE(gen11_modifier_order);
+ } else {
+ i915->modifier.order = gen_modifier_order;
+ i915->modifier.count = ARRAY_SIZE(gen_modifier_order);
+ }
+}
+
static uint64_t unset_flags(uint64_t current_flags, uint64_t mask)
{
uint64_t value = current_flags & ~mask;
@@ -60,13 +97,17 @@ static uint64_t unset_flags(uint64_t current_flags, uint64_t mask)
static int i915_add_combinations(struct driver *drv)
{
struct format_metadata metadata;
- uint64_t render, scanout_and_render, texture_only;
+ uint64_t render, scanout_and_render, texture_only, hw_protected;
+ struct i915_device *i915 = drv->priv;
scanout_and_render = BO_USE_RENDER_MASK | BO_USE_SCANOUT;
render = BO_USE_RENDER_MASK;
texture_only = BO_USE_TEXTURE_MASK;
- uint64_t linear_mask = BO_USE_RENDERSCRIPT | BO_USE_LINEAR | BO_USE_PROTECTED |
- BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN;
+ // HW protected buffers also need to be scanned out.
+ hw_protected = i915->has_hw_protection ? (BO_USE_PROTECTED | BO_USE_SCANOUT) : 0;
+
+ uint64_t linear_mask =
+ BO_USE_RENDERSCRIPT | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN;
metadata.tiling = I915_TILING_NONE;
metadata.priority = 1;
@@ -81,25 +122,24 @@ static int i915_add_combinations(struct driver *drv)
texture_only);
drv_modify_linear_combinations(drv);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
+
+ /* NV12 format for camera, display, decoding and encoding. */
/* IPU3 camera ISP supports only NV12 output. */
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER |
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER |
+ hw_protected);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
- * from camera.
+ * from camera and input/output from hardware decoder/encoder.
*/
drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
render = unset_flags(render, linear_mask);
scanout_and_render = unset_flags(scanout_and_render, linear_mask);
@@ -120,15 +160,17 @@ static int i915_add_combinations(struct driver *drv)
unset_flags(scanout_and_render, BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY);
/* Support y-tiled NV12 and P010 for libva */
#ifdef I915_SCANOUT_Y_TILED
- drv_add_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT);
+ uint64_t nv12_usage =
+ BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT | hw_protected;
+ uint64_t p010_usage = BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER | hw_protected;
#else
- drv_add_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER);
+ uint64_t nv12_usage = BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER;
+ uint64_t p010_usage = nv12_usage;
#endif
+ drv_add_combination(drv, DRM_FORMAT_NV12, &metadata, nv12_usage);
+ drv_add_combination(drv, DRM_FORMAT_P010, &metadata, p010_usage);
+
scanout_and_render = unset_flags(scanout_and_render, BO_USE_SCANOUT);
- drv_add_combination(drv, DRM_FORMAT_P010, &metadata,
- BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER);
drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
@@ -153,7 +195,15 @@ static int i915_align_dimensions(struct bo *bo, uint32_t tiling, uint32_t *strid
* horizontal alignment so that row start on a cache line (64
* bytes).
*/
+#ifdef LINEAR_ALIGN_256
+ /*
+ * If we want to import these buffers to amdgpu they need to
+ * their match LINEAR_ALIGNED requirement of 256 byte alignement.
+ */
+ horizontal_alignment = 256;
+#else
horizontal_alignment = 64;
+#endif
vertical_alignment = 4;
break;
@@ -206,13 +256,12 @@ static int i915_init(struct driver *drv)
int ret;
int device_id;
struct i915_device *i915;
- drm_i915_getparam_t get_param;
+ drm_i915_getparam_t get_param = { 0 };
i915 = calloc(1, sizeof(*i915));
if (!i915)
return -ENOMEM;
- memset(&get_param, 0, sizeof(get_param));
get_param.param = I915_PARAM_CHIPSET_ID;
get_param.value = &device_id;
ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
@@ -223,6 +272,7 @@ static int i915_init(struct driver *drv)
}
i915->gen = i915_get_gen(device_id);
+ i915_get_modifier_order(i915);
memset(&get_param, 0, sizeof(get_param));
get_param.param = I915_PARAM_HAS_LLC;
@@ -234,8 +284,10 @@ static int i915_init(struct driver *drv)
return -EINVAL;
}
- drv->priv = i915;
+ if (i915->gen >= 12)
+ i915->has_hw_protection = 1;
+ drv->priv = i915;
return i915_add_combinations(drv);
}
@@ -272,17 +324,13 @@ static int i915_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, u
static int i915_bo_compute_metadata(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags, const uint64_t *modifiers, uint32_t count)
{
- static const uint64_t modifier_order[] = {
- I915_FORMAT_MOD_Y_TILED_CCS,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- };
uint64_t modifier;
+ struct i915_device *i915 = bo->drv->priv;
+ bool huge_bo = (i915->gen < 11) && (width > 4096);
if (modifiers) {
modifier =
- drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order));
+ drv_pick_modifier(modifiers, count, i915->modifier.order, i915->modifier.count);
} else {
struct combination *combo = drv_get_combination(bo->drv, format, use_flags);
if (!combo)
@@ -290,6 +338,39 @@ static int i915_bo_compute_metadata(struct bo *bo, uint32_t width, uint32_t heig
modifier = combo->metadata.modifier;
}
+ /*
+ * i915 only supports linear/x-tiled above 4096 wide on Gen9/Gen10 GPU.
+ * VAAPI decode in NV12 Y tiled format so skip modifier change for NV12/P010 huge bo.
+ */
+ if (huge_bo && format != DRM_FORMAT_NV12 && format != DRM_FORMAT_P010 &&
+ modifier != I915_FORMAT_MOD_X_TILED && modifier != DRM_FORMAT_MOD_LINEAR) {
+ uint32_t i;
+ for (i = 0; modifiers && i < count; i++) {
+ if (modifiers[i] == I915_FORMAT_MOD_X_TILED)
+ break;
+ }
+ if (i == count)
+ modifier = DRM_FORMAT_MOD_LINEAR;
+ else
+ modifier = I915_FORMAT_MOD_X_TILED;
+ }
+
+ /*
+ * Skip I915_FORMAT_MOD_Y_TILED_CCS modifier if compression is disabled
+ * Pick y tiled modifier if it has been passed in, otherwise use linear
+ */
+ if (!bo->drv->compression && modifier == I915_FORMAT_MOD_Y_TILED_CCS) {
+ uint32_t i;
+ for (i = 0; modifiers && i < count; i++) {
+ if (modifiers[i] == I915_FORMAT_MOD_Y_TILED)
+ break;
+ }
+ if (i == count)
+ modifier = DRM_FORMAT_MOD_LINEAR;
+ else
+ modifier = I915_FORMAT_MOD_Y_TILED;
+ }
+
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
bo->meta.tiling = I915_TILING_NONE;
@@ -367,30 +448,56 @@ static int i915_bo_create_from_metadata(struct bo *bo)
{
int ret;
size_t plane;
- struct drm_i915_gem_create gem_create;
- struct drm_i915_gem_set_tiling gem_set_tiling;
+ uint32_t gem_handle;
+ struct drm_i915_gem_set_tiling gem_set_tiling = { 0 };
+ struct i915_device *i915 = bo->drv->priv;
- memset(&gem_create, 0, sizeof(gem_create));
- gem_create.size = bo->meta.total_size;
+ if (i915->has_hw_protection && (bo->meta.use_flags & BO_USE_PROTECTED)) {
+ struct drm_i915_gem_object_param protected_param = {
+ .param = I915_OBJECT_PARAM | I915_PARAM_PROTECTED_CONTENT,
+ .data = 1,
+ };
- ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
- if (ret) {
- drv_log("DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n", gem_create.size);
- return -errno;
+ struct drm_i915_gem_create_ext_setparam setparam_protected = {
+ .base = { .name = I915_GEM_CREATE_EXT_SETPARAM },
+ .param = protected_param,
+ };
+
+ struct drm_i915_gem_create_ext create_ext = {
+ .size = bo->meta.total_size,
+ .extensions = (uintptr_t)&setparam_protected,
+ };
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ if (ret) {
+ drv_log("DRM_IOCTL_I915_GEM_CREATE_EXT failed (size=%llu)\n",
+ create_ext.size);
+ return -errno;
+ }
+
+ gem_handle = create_ext.handle;
+ } else {
+ struct drm_i915_gem_create gem_create = { 0 };
+ gem_create.size = bo->meta.total_size;
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
+ if (ret) {
+ drv_log("DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n", gem_create.size);
+ return -errno;
+ }
+
+ gem_handle = gem_create.handle;
}
for (plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = gem_create.handle;
+ bo->handles[plane].u32 = gem_handle;
- memset(&gem_set_tiling, 0, sizeof(gem_set_tiling));
gem_set_tiling.handle = bo->handles[0].u32;
gem_set_tiling.tiling_mode = bo->meta.tiling;
gem_set_tiling.stride = bo->meta.strides[0];
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_TILING, &gem_set_tiling);
if (ret) {
- struct drm_gem_close gem_close;
- memset(&gem_close, 0, sizeof(gem_close));
+ struct drm_gem_close gem_close = { 0 };
gem_close.handle = bo->handles[0].u32;
drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
@@ -410,14 +517,13 @@ static void i915_close(struct driver *drv)
static int i915_bo_import(struct bo *bo, struct drv_import_fd_data *data)
{
int ret;
- struct drm_i915_gem_get_tiling gem_get_tiling;
+ struct drm_i915_gem_get_tiling gem_get_tiling = { 0 };
ret = drv_prime_bo_import(bo, data);
if (ret)
return ret;
/* TODO(gsingh): export modifiers and get rid of backdoor tiling. */
- memset(&gem_get_tiling, 0, sizeof(gem_get_tiling));
gem_get_tiling.handle = bo->handles[0].u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_GET_TILING, &gem_get_tiling);
@@ -434,15 +540,13 @@ static int i915_bo_import(struct bo *bo, struct drv_import_fd_data *data)
static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
- void *addr;
+ void *addr = MAP_FAILED;
if (bo->meta.format_modifiers[0] == I915_FORMAT_MOD_Y_TILED_CCS)
return MAP_FAILED;
if (bo->meta.tiling == I915_TILING_NONE) {
- struct drm_i915_gem_mmap gem_map;
- memset(&gem_map, 0, sizeof(gem_map));
-
+ struct drm_i915_gem_mmap gem_map = { 0 };
/* TODO(b/118799155): We don't seem to have a good way to
* detect the use cases for which WC mapping is really needed.
* The current heuristic seems overly coarse and may be slowing
@@ -461,18 +565,21 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
gem_map.size = bo->meta.total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_map);
- if (ret) {
- drv_log("DRM_IOCTL_I915_GEM_MMAP failed\n");
- return MAP_FAILED;
- }
+ /* DRM_IOCTL_I915_GEM_MMAP mmaps the underlying shm
+ * file and returns a user space address directly, ie,
+ * doesn't go through mmap. If we try that on a
+ * dma-buf that doesn't have a shm file, i915.ko
+ * returns ENXIO. Fall through to
+ * DRM_IOCTL_I915_GEM_MMAP_GTT in that case, which
+ * will mmap on the drm fd instead. */
+ if (ret == 0)
+ addr = (void *)(uintptr_t)gem_map.addr_ptr;
+ }
- addr = (void *)(uintptr_t)gem_map.addr_ptr;
- } else {
- struct drm_i915_gem_mmap_gtt gem_map;
- memset(&gem_map, 0, sizeof(gem_map));
+ if (addr == MAP_FAILED) {
+ struct drm_i915_gem_mmap_gtt gem_map = { 0 };
gem_map.handle = bo->handles[0].u32;
-
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gem_map);
if (ret) {
drv_log("DRM_IOCTL_I915_GEM_MMAP_GTT failed\n");
@@ -495,9 +602,8 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
int ret;
- struct drm_i915_gem_set_domain set_domain;
+ struct drm_i915_gem_set_domain set_domain = { 0 };
- memset(&set_domain, 0, sizeof(set_domain));
set_domain.handle = bo->handles[0].u32;
if (bo->meta.tiling == I915_TILING_NONE) {
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
diff --git a/chromium/third_party/minigbm/src/marvell.c b/chromium/third_party/minigbm/src/marvell.c
deleted file mode 100644
index c0b600b5c29..00000000000
--- a/chromium/third_party/minigbm/src/marvell.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2015 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifdef DRV_MARVELL
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
-
-static int marvell_init(struct driver *drv)
-{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
-
- return drv_add_linear_combinations(drv, render_target_formats,
- ARRAY_SIZE(render_target_formats));
-}
-
-const struct backend backend_marvell = {
- .name = "marvell",
- .init = marvell_init,
- .bo_create = drv_dumb_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
- .bo_unmap = drv_bo_munmap,
-};
-
-#endif
diff --git a/chromium/third_party/minigbm/src/mediatek.c b/chromium/third_party/minigbm/src/mediatek.c
index cdfc9ab73ae..a0b77e616a2 100644
--- a/chromium/third_party/minigbm/src/mediatek.c
+++ b/chromium/third_party/minigbm/src/mediatek.c
@@ -54,14 +54,7 @@ static int mediatek_init(struct driver *drv)
drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
&LINEAR_METADATA, BO_USE_TEXTURE_MASK);
- drv_add_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
- BO_USE_SW_MASK | BO_USE_LINEAR | BO_USE_PROTECTED);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
+ drv_add_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA, BO_USE_SW_MASK | BO_USE_LINEAR);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
@@ -72,20 +65,32 @@ static int mediatek_init(struct driver *drv)
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_DECODER);
drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &metadata, BO_USE_HW_VIDEO_DECODER);
+#if defined(MTK_MT8183) || defined(MTK_MT8192)
+ // TODO(hiroh): Switch to use NV12 for video decoder on MT8173 as well.
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_DECODER);
+#endif
+
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB for input/output from
+ * hardware decoder/encoder.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER |
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+
+ /* NV12 format for encoding and display. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER | BO_USE_CAMERA_READ |
+ BO_USE_CAMERA_WRITE);
#ifdef MTK_MT8183
/* Only for MT8183 Camera subsystem */
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
drv_modify_combination(drv, DRM_FORMAT_YUYV, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
- drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
/* Private formats for private reprocessing in camera */
drv_add_combination(drv, DRM_FORMAT_MTISP_SXYZW10, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SW_MASK);
@@ -101,7 +106,7 @@ static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
int ret;
size_t plane;
uint32_t stride;
- struct drm_mtk_gem_create gem_create;
+ struct drm_mtk_gem_create gem_create = { 0 };
if (!drv_has_modifier(modifiers, count, DRM_FORMAT_MOD_LINEAR)) {
errno = EINVAL;
@@ -140,7 +145,6 @@ static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
drv_bo_from_format(bo, stride, height, format);
}
- memset(&gem_create, 0, sizeof(gem_create));
gem_create.size = bo->meta.total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_CREATE, &gem_create);
@@ -166,10 +170,9 @@ static int mediatek_bo_create(struct bo *bo, uint32_t width, uint32_t height, ui
static void *mediatek_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret, prime_fd;
- struct drm_mtk_gem_map_off gem_map;
+ struct drm_mtk_gem_map_off gem_map = { 0 };
struct mediatek_private_map_data *priv;
- memset(&gem_map, 0, sizeof(gem_map));
gem_map.handle = bo->handles[0].u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_MAP_OFFSET, &gem_map);
@@ -265,21 +268,23 @@ static uint32_t mediatek_resolve_format(struct driver *drv, uint32_t format, uin
* reprocessing and hence given the private format for MTK. */
if (use_flags & BO_USE_CAMERA_READ)
return DRM_FORMAT_MTISP_SXYZW10;
- /* For non-reprocessing uses, only MT8183 Camera subsystem
- * requires NV12. */
- else if (use_flags & BO_USE_CAMERA_WRITE)
- return DRM_FORMAT_NV12;
#endif
+ if (use_flags & BO_USE_CAMERA_WRITE)
+ return DRM_FORMAT_NV12;
+
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
#ifdef MTK_MT8183
- /* MT8183 camera and decoder subsystems require NV12. */
- if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
- BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER)) {
+ // TODO(hiroh): Switch to use NV12 for video decoder on MT8173 as well.
+ if (use_flags & (BO_USE_HW_VIDEO_DECODER)) {
return DRM_FORMAT_NV12;
}
#endif
+ if (use_flags &
+ (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_ENCODER)) {
+ return DRM_FORMAT_NV12;
+ }
return DRM_FORMAT_YVU420;
default:
return format;
diff --git a/chromium/third_party/minigbm/src/meson.c b/chromium/third_party/minigbm/src/meson.c
deleted file mode 100644
index f82c57a685a..00000000000
--- a/chromium/third_party/minigbm/src/meson.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2018 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifdef DRV_MESON
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_BGR888, DRM_FORMAT_BGR565};
-
-static int meson_init(struct driver *drv)
-{
- drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
-
- return drv_modify_linear_combinations(drv);
-}
-
-const struct backend backend_meson = {
- .name = "meson",
- .init = meson_init,
- .bo_create = drv_dumb_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
- .bo_unmap = drv_bo_munmap,
-};
-
-#endif
diff --git a/chromium/third_party/minigbm/src/msm.c b/chromium/third_party/minigbm/src/msm.c
index fac1fd06d7d..acfc1efaf11 100644
--- a/chromium/third_party/minigbm/src/msm.c
+++ b/chromium/third_party/minigbm/src/msm.c
@@ -7,6 +7,7 @@
#ifdef DRV_MSM
#include <assert.h>
+#include <dlfcn.h>
#include <drm_fourcc.h>
#include <errno.h>
#include <inttypes.h>
@@ -65,6 +66,24 @@ static uint32_t get_ubwc_meta_size(uint32_t width, uint32_t height, uint32_t til
return ALIGN(macrotile_width * macrotile_height, PLANE_SIZE_ALIGN);
}
+static unsigned get_pitch_alignment(struct bo *bo)
+{
+ switch (bo->meta.format) {
+ case DRM_FORMAT_NV12:
+ return VENUS_STRIDE_ALIGN;
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU420_ANDROID:
+ /* TODO other YUV formats? */
+ /* Something (in the video stack?) assumes the U/V planes can use
+ * half the pitch as the Y plane.. to componsate, double the
+ * alignment:
+ */
+ return 2 * DEFAULT_ALIGNMENT;
+ default:
+ return DEFAULT_ALIGNMENT;
+ }
+}
+
static void msm_calculate_layout(struct bo *bo)
{
uint32_t width, height;
@@ -82,11 +101,14 @@ static void msm_calculate_layout(struct bo *bo)
y_stride = ALIGN(width, VENUS_STRIDE_ALIGN);
uv_stride = ALIGN(width, VENUS_STRIDE_ALIGN);
y_scanline = ALIGN(height, VENUS_SCANLINE_ALIGN * 2);
- uv_scanline = ALIGN(DIV_ROUND_UP(height, 2), VENUS_SCANLINE_ALIGN);
+ uv_scanline = ALIGN(DIV_ROUND_UP(height, 2),
+ VENUS_SCANLINE_ALIGN * (bo->meta.tiling ? 2 : 1));
y_plane = y_stride * y_scanline;
uv_plane = uv_stride * uv_scanline;
if (bo->meta.tiling == MSM_UBWC_TILING) {
+ y_plane = ALIGN(y_plane, PLANE_SIZE_ALIGN);
+ uv_plane = ALIGN(uv_plane, PLANE_SIZE_ALIGN);
y_plane += get_ubwc_meta_size(width, height, 32, 8);
uv_plane += get_ubwc_meta_size(width >> 1, height >> 1, 16, 8);
extra_padding = NV12_UBWC_PADDING(y_stride);
@@ -104,7 +126,7 @@ static void msm_calculate_layout(struct bo *bo)
} else {
uint32_t stride, alignw, alignh;
- alignw = ALIGN(width, DEFAULT_ALIGNMENT);
+ alignw = ALIGN(width, get_pitch_alignment(bo));
/* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned.
DRM_FORMAT_R8 of height one is used for JPEG camera output, so don't
height align that. */
@@ -157,13 +179,51 @@ static void msm_add_ubwc_combinations(struct driver *drv, const uint32_t *format
}
}
+/**
+ * Check for buggy apps that are known to not support modifiers, to avoid surprising them
+ * with a UBWC buffer.
+ */
+static bool should_avoid_ubwc(void)
+{
+#ifndef __ANDROID__
+ /* waffle is buggy and, requests a renderable buffer (which on qcom platforms, we
+ * want to use UBWC), and then passes it to the kernel discarding the modifier.
+ * So mesa ends up correctly rendering to as tiled+compressed, but kernel tries
+ * to display as linear. Other platforms do not see this issue, simply because
+ * they only use compressed (ex, AFBC) with the BO_USE_SCANOUT flag.
+ *
+ * See b/163137550
+ */
+ if (dlsym(RTLD_DEFAULT, "waffle_display_connect")) {
+ drv_log("WARNING: waffle detected, disabling UBWC\n");
+ return true;
+ }
+
+ /* The video_decode_accelerator_tests needs to read back the frames
+ * to verify they are correct. The frame verification relies on
+ * computing the MD5 of the video frame. UBWC results in a different
+ * MD5. This turns off UBWC for gtest until a proper frame
+ * comparison can be made
+ * Rely on the same mechanism that waffle is using, but this time check
+ * for a dynamic library function that is present in chrome, but missing
+ * in gtest. Cups is not loaded for video tests.
+ *
+ * See b/171260705
+ */
+ if (!dlsym(RTLD_DEFAULT, "cupsFilePrintf")) {
+ drv_log("WARNING: gtest detected, disabling UBWC\n");
+ return true;
+ }
+#endif
+ return false;
+}
+
static int msm_init(struct driver *drv)
{
struct format_metadata metadata;
uint64_t render_use_flags = BO_USE_RENDER_MASK | BO_USE_SCANOUT;
uint64_t texture_use_flags = BO_USE_TEXTURE_MASK | BO_USE_HW_VIDEO_DECODER;
- uint64_t sw_flags = (BO_USE_RENDERSCRIPT | BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_OFTEN |
- BO_USE_LINEAR | BO_USE_PROTECTED);
+ uint64_t sw_flags = (BO_USE_RENDERSCRIPT | BO_USE_SW_MASK | BO_USE_LINEAR);
drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
&LINEAR_METADATA, render_use_flags);
@@ -171,28 +231,28 @@ static int msm_init(struct driver *drv)
drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
&LINEAR_METADATA, texture_use_flags);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
-
/* The camera stack standardizes on NV12 for YUV buffers. */
+ /* YVU420 and NV12 formats for camera, display and encoding. */
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_ENCODER);
+
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
- * from camera.
+ * from camera and input/output from hardware decoder/encoder.
*/
drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
drv_modify_linear_combinations(drv);
+ if (should_avoid_ubwc() || !drv->compression)
+ return 0;
+
metadata.tiling = MSM_UBWC_TILING;
metadata.priority = 2;
metadata.modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED;
@@ -206,21 +266,23 @@ static int msm_init(struct driver *drv)
msm_add_ubwc_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
&metadata, texture_use_flags);
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_ENCODER);
+
return 0;
}
static int msm_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height,
uint32_t format, const uint64_t modifier)
{
- struct drm_msm_gem_new req;
+ struct drm_msm_gem_new req = { 0 };
int ret;
size_t i;
bo->meta.tiling = (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) ? MSM_UBWC_TILING : 0;
-
msm_calculate_layout(bo);
- memset(&req, 0, sizeof(req));
req.flags = MSM_BO_WC | MSM_BO_SCANOUT;
req.size = bo->meta.total_size;
@@ -253,6 +315,9 @@ static int msm_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t
uint64_t modifier =
drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order));
+ if (!bo->drv->compression && modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED)
+ modifier = DRM_FORMAT_MOD_LINEAR;
+
return msm_bo_create_for_modifier(bo, width, height, format, modifier);
}
@@ -273,11 +338,9 @@ static int msm_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_
static void *msm_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
- struct drm_msm_gem_info req;
+ struct drm_msm_gem_info req = { 0 };
- memset(&req, 0, sizeof(req));
req.handle = bo->handles[0].u32;
-
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MSM_GEM_INFO, &req);
if (ret) {
drv_log("DRM_IOCLT_MSM_GEM_INFO failed with %s\n", strerror(errno));
@@ -292,6 +355,12 @@ static void *msm_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t m
static uint32_t msm_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
switch (format) {
+ case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+ /* Camera subsystem requires NV12. */
+ if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
+ return DRM_FORMAT_NV12;
+ /*HACK: See b/28671744 */
+ return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
return DRM_FORMAT_NV12;
default:
diff --git a/chromium/third_party/minigbm/src/nouveau.c b/chromium/third_party/minigbm/src/nouveau.c
deleted file mode 100644
index d0f25d4129d..00000000000
--- a/chromium/third_party/minigbm/src/nouveau.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2016 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
-
-static int nouveau_init(struct driver *drv)
-{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
-
- return drv_modify_linear_combinations(drv);
-}
-
-const struct backend backend_nouveau = {
- .name = "nouveau",
- .init = nouveau_init,
- .bo_create = drv_dumb_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
- .bo_unmap = drv_bo_munmap,
-};
diff --git a/chromium/third_party/minigbm/src/presubmit.sh b/chromium/third_party/minigbm/src/presubmit.sh
index 5e8a32a256f..69665f2b3e5 100755
--- a/chromium/third_party/minigbm/src/presubmit.sh
+++ b/chromium/third_party/minigbm/src/presubmit.sh
@@ -4,6 +4,7 @@
# found in the LICENSE file.
find \
'(' -name '*.[ch]' -or -name '*.cc' ')' \
- -not -name 'virtgpu_drm.h' \
+ -not -name 'virtgpu_drm.h' -not -name 'i915_drm.h' \
-not -name 'gbm.h' -not -name 'virgl_hw.h' \
+ -not -name 'virgl_protocol.h' \
-exec clang-format -style=file -i {} +
diff --git a/chromium/third_party/minigbm/src/radeon.c b/chromium/third_party/minigbm/src/radeon.c
deleted file mode 100644
index 68445c16372..00000000000
--- a/chromium/third_party/minigbm/src/radeon.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2017 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
-
-static int radeon_init(struct driver *drv)
-{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
-
- return drv_modify_linear_combinations(drv);
-}
-
-const struct backend backend_radeon = {
- .name = "radeon",
- .init = radeon_init,
- .bo_create = drv_dumb_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
- .bo_unmap = drv_bo_munmap,
-};
diff --git a/chromium/third_party/minigbm/src/rockchip.c b/chromium/third_party/minigbm/src/rockchip.c
index 25f16abdaa3..30d9fbe4453 100644
--- a/chromium/third_party/minigbm/src/rockchip.c
+++ b/chromium/third_party/minigbm/src/rockchip.c
@@ -88,24 +88,20 @@ static int rockchip_init(struct driver *drv)
drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
BO_USE_TEXTURE_MASK);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
+ /* NV12 format for camera, display, decoding and encoding. */
/* Camera ISP supports only NV12 output. */
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
- BO_USE_HW_VIDEO_ENCODER | BO_USE_SCANOUT);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
drv_modify_linear_combinations(drv);
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
- * from camera.
+ * from camera and input/output from hardware decoder/encoder.
*/
drv_add_combination(drv, DRM_FORMAT_R8, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SW_MASK |
- BO_USE_LINEAR | BO_USE_PROTECTED);
+ BO_USE_LINEAR | BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
return 0;
}
@@ -116,7 +112,7 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
{
int ret;
size_t plane;
- struct drm_rockchip_gem_create gem_create;
+ struct drm_rockchip_gem_create gem_create = { 0 };
if (format == DRM_FORMAT_NV12) {
uint32_t w_mbs = DIV_ROUND_UP(width, 16);
@@ -132,7 +128,8 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
*/
bo->meta.total_size += w_mbs * h_mbs * 128;
} else if (width <= 2560 &&
- drv_has_modifier(modifiers, count, DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC)) {
+ drv_has_modifier(modifiers, count, DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC) &&
+ bo->drv->compression) {
/* If the caller has decided they can use AFBC, always
* pick that */
afbc_bo_from_format(bo, width, height, format);
@@ -159,9 +156,7 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
drv_bo_from_format(bo, stride, height, format);
}
- memset(&gem_create, 0, sizeof(gem_create));
gem_create.size = bo->meta.total_size;
-
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE, &gem_create);
if (ret) {
@@ -187,17 +182,15 @@ static int rockchip_bo_create(struct bo *bo, uint32_t width, uint32_t height, ui
static void *rockchip_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
- struct drm_rockchip_gem_map_off gem_map;
struct rockchip_private_map_data *priv;
+ struct drm_rockchip_gem_map_off gem_map = { 0 };
/* We can only map buffers created with SW access flags, which should
* have no modifiers (ie, not AFBC). */
if (bo->meta.format_modifiers[0] == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC)
return MAP_FAILED;
- memset(&gem_map, 0, sizeof(gem_map));
gem_map.handle = bo->handles[0].u32;
-
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET, &gem_map);
if (ret) {
drv_log("DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET failed\n");
diff --git a/chromium/third_party/minigbm/src/synaptics.c b/chromium/third_party/minigbm/src/synaptics.c
deleted file mode 100644
index bcd8189cfea..00000000000
--- a/chromium/third_party/minigbm/src/synaptics.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright 2020 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-#ifdef DRV_SYNAPTICS
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888 };
-
-static int synaptics_init(struct driver *drv)
-{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
-
- return drv_modify_linear_combinations(drv);
-}
-
-const struct backend backend_synaptics = {
- .name = "synaptics",
- .init = synaptics_init,
- .bo_create = drv_dumb_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
- .bo_unmap = drv_bo_munmap,
-};
-
-#endif
diff --git a/chromium/third_party/minigbm/src/tegra.c b/chromium/third_party/minigbm/src/tegra.c
index df97461c379..c22a9a9302a 100644
--- a/chromium/third_party/minigbm/src/tegra.c
+++ b/chromium/third_party/minigbm/src/tegra.c
@@ -213,7 +213,7 @@ static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint3
{
uint32_t size, stride, block_height_log2 = 0;
enum nv_mem_kind kind = NV_MEM_KIND_PITCH;
- struct drm_tegra_gem_create gem_create;
+ struct drm_tegra_gem_create gem_create = { 0 };
int ret;
if (use_flags &
@@ -223,7 +223,6 @@ static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint3
compute_layout_blocklinear(width, height, format, &kind, &block_height_log2,
&stride, &size);
- memset(&gem_create, 0, sizeof(gem_create));
gem_create.size = size;
gem_create.flags = 0;
@@ -239,9 +238,8 @@ static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint3
bo->meta.strides[0] = stride;
if (kind != NV_MEM_KIND_PITCH) {
- struct drm_tegra_gem_set_tiling gem_tile;
+ struct drm_tegra_gem_set_tiling gem_tile = { 0 };
- memset(&gem_tile, 0, sizeof(gem_tile));
gem_tile.handle = bo->handles[0].u32;
gem_tile.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
gem_tile.value = block_height_log2;
@@ -264,16 +262,14 @@ static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint3
static int tegra_bo_import(struct bo *bo, struct drv_import_fd_data *data)
{
int ret;
- struct drm_tegra_gem_get_tiling gem_get_tiling;
+ struct drm_tegra_gem_get_tiling gem_get_tiling = { 0 };
ret = drv_prime_bo_import(bo, data);
if (ret)
return ret;
/* TODO(gsingh): export modifiers and get rid of backdoor tiling. */
- memset(&gem_get_tiling, 0, sizeof(gem_get_tiling));
gem_get_tiling.handle = bo->handles[0].u32;
-
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_GET_TILING, &gem_get_tiling);
if (ret) {
drv_gem_bo_destroy(bo);
@@ -299,12 +295,10 @@ static int tegra_bo_import(struct bo *bo, struct drv_import_fd_data *data)
static void *tegra_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
- struct drm_tegra_gem_mmap gem_map;
+ struct drm_tegra_gem_mmap gem_map = { 0 };
struct tegra_private_map_data *priv;
- memset(&gem_map, 0, sizeof(gem_map));
gem_map.handle = bo->handles[0].u32;
-
ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_MMAP, &gem_map, sizeof(gem_map));
if (ret < 0) {
drv_log("DRM_TEGRA_GEM_MMAP failed\n");
diff --git a/chromium/third_party/minigbm/src/udl.c b/chromium/third_party/minigbm/src/udl.c
deleted file mode 100644
index 12dc9677a4f..00000000000
--- a/chromium/third_party/minigbm/src/udl.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2014 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
-
-static int udl_init(struct driver *drv)
-{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
-
- return drv_modify_linear_combinations(drv);
-}
-
-const struct backend backend_udl = {
- .name = "udl",
- .init = udl_init,
- .bo_create = drv_dumb_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
- .bo_unmap = drv_bo_munmap,
-};
diff --git a/chromium/third_party/minigbm/src/vc4.c b/chromium/third_party/minigbm/src/vc4.c
index 06b3ed77d9b..5ea4bc3e2fc 100644
--- a/chromium/third_party/minigbm/src/vc4.c
+++ b/chromium/third_party/minigbm/src/vc4.c
@@ -20,11 +20,23 @@
static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888 };
+static const uint32_t texture_only_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_YVU420 };
+
static int vc4_init(struct driver *drv)
{
drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
&LINEAR_METADATA, BO_USE_RENDER_MASK);
+ drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats),
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+ /*
+ * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
+ * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
+ BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER);
+
return drv_modify_linear_combinations(drv);
}
@@ -34,7 +46,7 @@ static int vc4_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t he
int ret;
size_t plane;
uint32_t stride;
- struct drm_vc4_create_bo bo_create;
+ struct drm_vc4_create_bo bo_create = { 0 };
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -54,12 +66,11 @@ static int vc4_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t he
stride = ALIGN(stride, 64);
drv_bo_from_format(bo, stride, height, format);
- memset(&bo_create, 0, sizeof(bo_create));
bo_create.size = bo->meta.total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VC4_CREATE_BO, &bo_create);
if (ret) {
- drv_log("DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n", bo->meta.total_size);
+ drv_log("DRM_IOCTL_VC4_CREATE_BO failed (size=%zu)\n", bo->meta.total_size);
return -errno;
}
@@ -97,11 +108,9 @@ static int vc4_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t
static void *vc4_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
- struct drm_vc4_mmap_bo bo_map;
+ struct drm_vc4_mmap_bo bo_map = { 0 };
- memset(&bo_map, 0, sizeof(bo_map));
bo_map.handle = bo->handles[0].u32;
-
ret = drmCommandWriteRead(bo->drv->fd, DRM_VC4_MMAP_BO, &bo_map, sizeof(bo_map));
if (ret) {
drv_log("DRM_VC4_MMAP_BO failed\n");
diff --git a/chromium/third_party/minigbm/src/vgem.c b/chromium/third_party/minigbm/src/vgem.c
deleted file mode 100644
index 0d0371c24a0..00000000000
--- a/chromium/third_party/minigbm/src/vgem.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright 2016 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-#define MESA_LLVMPIPE_TILE_ORDER 6
-#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888 };
-
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
- DRM_FORMAT_YVU420_ANDROID };
-
-static int vgem_init(struct driver *drv)
-{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
-
- drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
-
- return drv_modify_linear_combinations(drv);
-}
-
-static int vgem_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t flags)
-{
- width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
- height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
-
- return drv_dumb_bo_create(bo, width, height, format, flags);
-}
-
-static uint32_t vgem_resolve_format(struct driver *drv, uint32_t format, uint64_t flags)
-{
- switch (format) {
- case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
- /*HACK: See b/28671744 */
- return DRM_FORMAT_XBGR8888;
- case DRM_FORMAT_FLEX_YCbCr_420_888:
- return DRM_FORMAT_YVU420;
- default:
- return format;
- }
-}
-
-const struct backend backend_vgem = {
- .name = "vgem",
- .init = vgem_init,
- .bo_create = vgem_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
- .bo_unmap = drv_bo_munmap,
- .resolve_format = vgem_resolve_format,
-};
diff --git a/chromium/third_party/minigbm/src/virtio_gpu.c b/chromium/third_party/minigbm/src/virtio_gpu.c
index eb6c97d6f8e..d43c4e477b6 100644
--- a/chromium/third_party/minigbm/src/virtio_gpu.c
+++ b/chromium/third_party/minigbm/src/virtio_gpu.c
@@ -4,7 +4,9 @@
* found in the LICENSE file.
*/
+#include <assert.h>
#include <errno.h>
+#include <stdatomic.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
@@ -12,10 +14,11 @@
#include <xf86drm.h>
#include "drv_priv.h"
+#include "external/virgl_hw.h"
+#include "external/virgl_protocol.h"
+#include "external/virtgpu_drm.h"
#include "helpers.h"
#include "util.h"
-#include "virgl_hw.h"
-#include "virtgpu_drm.h"
#ifndef PAGE_SIZE
#define PAGE_SIZE 0x1000
@@ -25,26 +28,59 @@
#define MESA_LLVMPIPE_TILE_ORDER 6
#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
+struct feature {
+ uint64_t feature;
+ const char *name;
+ uint32_t enabled;
+};
+
+enum feature_id {
+ feat_3d,
+ feat_capset_fix,
+ feat_resource_blob,
+ feat_host_visible,
+ feat_host_cross_device,
+ feat_max,
+};
+
+#define FEATURE(x) \
+ (struct feature) \
+ { \
+ x, #x, 0 \
+ }
+
+static struct feature features[] = {
+ FEATURE(VIRTGPU_PARAM_3D_FEATURES), FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
+ FEATURE(VIRTGPU_PARAM_RESOURCE_BLOB), FEATURE(VIRTGPU_PARAM_HOST_VISIBLE),
+ FEATURE(VIRTGPU_PARAM_CROSS_DEVICE),
+};
+
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888 };
-static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
- DRM_FORMAT_NV12,
- DRM_FORMAT_YVU420_ANDROID };
+static const uint32_t dumb_texture_source_formats[] = {
+ DRM_FORMAT_R8, DRM_FORMAT_R16, DRM_FORMAT_YVU420,
+ DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_YVU420_ANDROID
+};
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, DRM_FORMAT_RG88,
- DRM_FORMAT_YVU420_ANDROID };
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21,
+ DRM_FORMAT_R8, DRM_FORMAT_R16,
+ DRM_FORMAT_RG88, DRM_FORMAT_YVU420_ANDROID };
struct virtio_gpu_priv {
- int has_3d;
int caps_is_v2;
union virgl_caps caps;
+ int host_gbm_enabled;
+ atomic_int next_blob_id;
};
static uint32_t translate_format(uint32_t drm_fourcc)
{
switch (drm_fourcc) {
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB888:
+ return VIRGL_FORMAT_R8G8B8_UNORM;
case DRM_FORMAT_XRGB8888:
return VIRGL_FORMAT_B8G8R8X8_UNORM;
case DRM_FORMAT_ARGB8888:
@@ -53,6 +89,8 @@ static uint32_t translate_format(uint32_t drm_fourcc)
return VIRGL_FORMAT_R8G8B8X8_UNORM;
case DRM_FORMAT_ABGR8888:
return VIRGL_FORMAT_R8G8B8A8_UNORM;
+ case DRM_FORMAT_ABGR16161616F:
+ return VIRGL_FORMAT_R16G16B16A16_FLOAT;
case DRM_FORMAT_RGB565:
return VIRGL_FORMAT_B5G6R5_UNORM;
case DRM_FORMAT_R8:
@@ -61,6 +99,8 @@ static uint32_t translate_format(uint32_t drm_fourcc)
return VIRGL_FORMAT_R8G8_UNORM;
case DRM_FORMAT_NV12:
return VIRGL_FORMAT_NV12;
+ case DRM_FORMAT_NV21:
+ return VIRGL_FORMAT_NV21;
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YVU420_ANDROID:
return VIRGL_FORMAT_YV12;
@@ -69,8 +109,8 @@ static uint32_t translate_format(uint32_t drm_fourcc)
}
}
-static bool virtio_gpu_supports_format(struct virgl_supported_format_mask *supported,
- uint32_t drm_format)
+static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mask *supported,
+ uint32_t drm_format)
{
uint32_t virgl_format = translate_format(drm_format);
if (!virgl_format) {
@@ -82,6 +122,243 @@ static bool virtio_gpu_supports_format(struct virgl_supported_format_mask *suppo
return supported->bitmask[bitmask_index] & (1 << bit_index);
}
+// The metadata generated here for emulated buffers is slightly different than the metadata
+// generated by drv_bo_from_format. In order to simplify transfers in the flush and invalidate
+// functions below, the emulated buffers are oversized. For example, ignoring stride alignment
+// requirements to demonstrate, a 6x6 YUV420 image buffer might have the following layout from
+// drv_bo_from_format:
+//
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | U | U | U | U | U | U |
+// | U | U | U | V | V | V |
+// | V | V | V | V | V | V |
+//
+// where each plane immediately follows the previous plane in memory. This layout makes it
+// difficult to compute the transfers needed for example when the middle 2x2 region of the
+// image is locked and needs to be flushed/invalidated.
+//
+// Emulated multi-plane buffers instead have a layout of:
+//
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | U | U | U | | | |
+// | U | U | U | | | |
+// | U | U | U | | | |
+// | V | V | V | | | |
+// | V | V | V | | | |
+// | V | V | V | | | |
+//
+// where each plane is placed as a sub-image (albeit with a very large stride) in order to
+// simplify transfers into 3 sub-image transfers for the above example.
+//
+// Additional note: the V-plane is not placed to the right of the U-plane due to some
+// observed failures in media framework code which assumes the V-plane is not
+// "row-interlaced" with the U-plane.
+static void virtio_gpu_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
+{
+ uint32_t y_plane_height;
+ uint32_t c_plane_height;
+ uint32_t original_width = bo->meta.width;
+ uint32_t original_height = bo->meta.height;
+
+ metadata->format = DRM_FORMAT_R8;
+ switch (bo->meta.format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ // Bi-planar
+ metadata->num_planes = 2;
+
+ y_plane_height = original_height;
+ c_plane_height = DIV_ROUND_UP(original_height, 2);
+
+ metadata->width = original_width;
+ metadata->height = y_plane_height + c_plane_height;
+
+ // Y-plane (full resolution)
+ metadata->strides[0] = metadata->width;
+ metadata->offsets[0] = 0;
+ metadata->sizes[0] = metadata->width * y_plane_height;
+
+ // CbCr-plane (half resolution, interleaved, placed below Y-plane)
+ metadata->strides[1] = metadata->width;
+ metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
+ metadata->sizes[1] = metadata->width * c_plane_height;
+
+ metadata->total_size = metadata->width * metadata->height;
+ break;
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU420_ANDROID:
+ // Tri-planar
+ metadata->num_planes = 3;
+
+ y_plane_height = original_height;
+ c_plane_height = DIV_ROUND_UP(original_height, 2);
+
+ metadata->width = ALIGN(original_width, 32);
+ metadata->height = y_plane_height + (2 * c_plane_height);
+
+ // Y-plane (full resolution)
+ metadata->strides[0] = metadata->width;
+ metadata->offsets[0] = 0;
+ metadata->sizes[0] = metadata->width * original_height;
+
+ // Cb-plane (half resolution, placed below Y-plane)
+ metadata->strides[1] = metadata->width;
+ metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
+ metadata->sizes[1] = metadata->width * c_plane_height;
+
+ // Cr-plane (half resolution, placed below Cb-plane)
+ metadata->strides[2] = metadata->width;
+ metadata->offsets[2] = metadata->offsets[1] + metadata->sizes[1];
+ metadata->sizes[2] = metadata->width * c_plane_height;
+
+ metadata->total_size = metadata->width * metadata->height;
+ break;
+ default:
+ break;
+ }
+}
+
+struct virtio_transfers_params {
+ size_t xfers_needed;
+ struct rectangle xfer_boxes[DRV_MAX_PLANES];
+};
+
+static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
+ const struct rectangle *transfer_box,
+ struct virtio_transfers_params *xfer_params)
+{
+ uint32_t y_plane_height;
+ uint32_t c_plane_height;
+ struct bo_metadata emulated_metadata;
+
+ if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width &&
+ transfer_box->height == bo->meta.height) {
+ virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
+
+ xfer_params->xfers_needed = 1;
+ xfer_params->xfer_boxes[0].x = 0;
+ xfer_params->xfer_boxes[0].y = 0;
+ xfer_params->xfer_boxes[0].width = emulated_metadata.width;
+ xfer_params->xfer_boxes[0].height = emulated_metadata.height;
+
+ return;
+ }
+
+ switch (bo->meta.format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ // Bi-planar
+ xfer_params->xfers_needed = 2;
+
+ y_plane_height = bo->meta.height;
+ c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
+
+ // Y-plane (full resolution)
+ xfer_params->xfer_boxes[0].x = transfer_box->x;
+ xfer_params->xfer_boxes[0].y = transfer_box->y;
+ xfer_params->xfer_boxes[0].width = transfer_box->width;
+ xfer_params->xfer_boxes[0].height = transfer_box->height;
+
+ // CbCr-plane (half resolution, interleaved, placed below Y-plane)
+ xfer_params->xfer_boxes[1].x = transfer_box->x;
+ xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
+ xfer_params->xfer_boxes[1].width = transfer_box->width;
+ xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
+
+ break;
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU420_ANDROID:
+ // Tri-planar
+ xfer_params->xfers_needed = 3;
+
+ y_plane_height = bo->meta.height;
+ c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
+
+ // Y-plane (full resolution)
+ xfer_params->xfer_boxes[0].x = transfer_box->x;
+ xfer_params->xfer_boxes[0].y = transfer_box->y;
+ xfer_params->xfer_boxes[0].width = transfer_box->width;
+ xfer_params->xfer_boxes[0].height = transfer_box->height;
+
+ // Cb-plane (half resolution, placed below Y-plane)
+ xfer_params->xfer_boxes[1].x = transfer_box->x;
+ xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
+ xfer_params->xfer_boxes[1].width = DIV_ROUND_UP(transfer_box->width, 2);
+ xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
+
+ // Cr-plane (half resolution, placed below Cb-plane)
+ xfer_params->xfer_boxes[2].x = transfer_box->x;
+ xfer_params->xfer_boxes[2].y = transfer_box->y + y_plane_height + c_plane_height;
+ xfer_params->xfer_boxes[2].width = DIV_ROUND_UP(transfer_box->width, 2);
+ xfer_params->xfer_boxes[2].height = DIV_ROUND_UP(transfer_box->height, 2);
+
+ break;
+ }
+}
+
+static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_t drm_format,
+ uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ if (priv->caps.max_version == 0) {
+ return true;
+ }
+
+ if ((use_flags & BO_USE_RENDERING) &&
+ !virtio_gpu_bitmask_supports_format(&priv->caps.v1.render, drm_format)) {
+ return false;
+ }
+
+ if ((use_flags & BO_USE_TEXTURE) &&
+ !virtio_gpu_bitmask_supports_format(&priv->caps.v1.sampler, drm_format)) {
+ return false;
+ }
+
+ if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
+ !virtio_gpu_bitmask_supports_format(&priv->caps.v2.scanout, drm_format)) {
+ return false;
+ }
+
+ return true;
+}
+
+// For virtio backends that do not support formats natively (e.g. multi-planar formats are not
+// supported in virglrenderer when gbm is unavailable on the host machine), whether or not the
+// format and usage combination can be handled as a blob (byte buffer).
+static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv,
+ uint32_t drm_format,
+ uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ // Only enable emulation on non-gbm virtio backends.
+ if (priv->host_gbm_enabled) {
+ return false;
+ }
+
+ if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT)) {
+ return false;
+ }
+
+ if (!virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags)) {
+ return false;
+ }
+
+ return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 ||
+ drm_format == DRM_FORMAT_YVU420 || drm_format == DRM_FORMAT_YVU420_ANDROID;
+}
+
// Adds the given buffer combination to the list of supported buffer combinations if the
// combination is supported by the virtio backend.
static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
@@ -89,23 +366,19 @@ static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
{
struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
- if (priv->has_3d && priv->caps.max_version >= 1) {
- if ((use_flags & BO_USE_RENDERING) &&
- !virtio_gpu_supports_format(&priv->caps.v1.render, drm_format)) {
- drv_log("Skipping unsupported render format: %d\n", drm_format);
- return;
+ if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
+ if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
+ !virtio_gpu_supports_combination_natively(drv, drm_format, use_flags)) {
+ drv_log("Scanout format: %d\n", drm_format);
+ use_flags &= ~BO_USE_SCANOUT;
}
- if ((use_flags & BO_USE_TEXTURE) &&
- !virtio_gpu_supports_format(&priv->caps.v1.sampler, drm_format)) {
- drv_log("Skipping unsupported texture format: %d\n", drm_format);
+ if (!virtio_gpu_supports_combination_natively(drv, drm_format, use_flags) &&
+ !virtio_gpu_supports_combination_through_emulation(drv, drm_format,
+ use_flags)) {
+ drv_log("Skipping unsupported combination format:%d\n", drm_format);
return;
}
- if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
- !virtio_gpu_supports_format(&priv->caps.v2.scanout, drm_format)) {
- drv_log("Unsupported scanout format: %d\n", drm_format);
- use_flags &= ~BO_USE_SCANOUT;
- }
}
drv_add_combination(drv, drm_format, metadata, use_flags);
@@ -155,15 +428,33 @@ static uint32_t use_flags_to_bind(uint64_t use_flags)
handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR);
+ if (use_flags & BO_USE_PROTECTED) {
+ handle_flag(&use_flags, BO_USE_PROTECTED, &bind, VIRGL_BIND_MINIGBM_PROTECTED);
+ } else {
+ // Make sure we don't set both flags, since that could be mistaken for
+ // protected. Give OFTEN priority over RARELY.
+ if (use_flags & BO_USE_SW_READ_OFTEN) {
+ handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind,
+ VIRGL_BIND_MINIGBM_SW_READ_OFTEN);
+ } else {
+ handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind,
+ VIRGL_BIND_MINIGBM_SW_READ_RARELY);
+ }
+ if (use_flags & BO_USE_SW_WRITE_OFTEN) {
+ handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind,
+ VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN);
+ } else {
+ handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind,
+ VIRGL_BIND_MINIGBM_SW_WRITE_RARELY);
+ }
+ }
- // All host drivers only support linear camera buffer formats. If
- // that changes, this will need to be modified.
- handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR);
+ handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_MINIGBM_CAMERA_WRITE);
+ handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_MINIGBM_CAMERA_READ);
+ handle_flag(&use_flags, BO_USE_HW_VIDEO_DECODER, &bind,
+ VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER);
+ handle_flag(&use_flags, BO_USE_HW_VIDEO_ENCODER, &bind,
+ VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER);
if (use_flags) {
drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
@@ -176,11 +467,30 @@ static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height
uint64_t use_flags)
{
int ret;
+ size_t i;
uint32_t stride;
- struct drm_virtgpu_resource_create res_create;
+ struct drm_virtgpu_resource_create res_create = { 0 };
+ struct bo_metadata emulated_metadata;
- stride = drv_stride_from_format(format, width, 0);
- drv_bo_from_format(bo, stride, height, format);
+ if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) {
+ stride = drv_stride_from_format(format, width, 0);
+ drv_bo_from_format(bo, stride, height, format);
+ } else {
+ assert(
+ virtio_gpu_supports_combination_through_emulation(bo->drv, format, use_flags));
+
+ virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
+
+ format = emulated_metadata.format;
+ width = emulated_metadata.width;
+ height = emulated_metadata.height;
+ for (i = 0; i < emulated_metadata.num_planes; i++) {
+ bo->meta.strides[i] = emulated_metadata.strides[i];
+ bo->meta.offsets[i] = emulated_metadata.offsets[i];
+ bo->meta.sizes[i] = emulated_metadata.sizes[i];
+ }
+ bo->meta.total_size = emulated_metadata.total_size;
+ }
/*
* Setting the target is intended to ensure this resource gets bound as a 2D
@@ -189,7 +499,6 @@ static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height
* virglrenderer. When virglrenderer makes a resource, it will convert the target
* enum to the equivalent one in GL and then bind the resource to that target.
*/
- memset(&res_create, 0, sizeof(res_create));
res_create.target = PIPE_TEXTURE_2D;
res_create.format = translate_format(format);
@@ -219,11 +528,9 @@ static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height
static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
- struct drm_virtgpu_map gem_map;
+ struct drm_virtgpu_map gem_map = { 0 };
- memset(&gem_map, 0, sizeof(gem_map));
gem_map.handle = bo->handles[0].u32;
-
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
if (ret) {
drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
@@ -238,22 +545,11 @@ static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, u
static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
{
int ret;
- struct drm_virtgpu_get_caps cap_args;
- struct drm_virtgpu_getparam param_args;
- uint32_t can_query_v2 = 0;
-
- memset(&param_args, 0, sizeof(param_args));
- param_args.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
- param_args.value = (uint64_t)(uintptr_t)&can_query_v2;
- ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &param_args);
- if (ret) {
- drv_log("DRM_IOCTL_VIRTGPU_GETPARAM failed with %s\n", strerror(errno));
- }
+ struct drm_virtgpu_get_caps cap_args = { 0 };
*caps_is_v2 = 0;
- memset(&cap_args, 0, sizeof(cap_args));
cap_args.addr = (unsigned long long)caps;
- if (can_query_v2) {
+ if (features[feat_capset_fix].enabled) {
*caps_is_v2 = 1;
cap_args.cap_set_id = 2;
cap_args.size = sizeof(union virgl_caps);
@@ -280,28 +576,39 @@ static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *
return ret;
}
+static void virtio_gpu_init_features_and_caps(struct driver *drv)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
+ struct drm_virtgpu_getparam params = { 0 };
+
+ params.param = features[i].feature;
+ params.value = (uint64_t)(uintptr_t)&features[i].enabled;
+ int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &params);
+ if (ret)
+ drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
+ }
+
+ if (features[feat_3d].enabled) {
+ virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
+ }
+
+ // Multi-planar formats are currently only supported in virglrenderer through gbm.
+ priv->host_gbm_enabled =
+ virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
+}
+
static int virtio_gpu_init(struct driver *drv)
{
- int ret;
struct virtio_gpu_priv *priv;
- struct drm_virtgpu_getparam args;
priv = calloc(1, sizeof(*priv));
drv->priv = priv;
- memset(&args, 0, sizeof(args));
- args.param = VIRTGPU_PARAM_3D_FEATURES;
- args.value = (uint64_t)(uintptr_t)&priv->has_3d;
- ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &args);
- if (ret) {
- drv_log("virtio 3D acceleration is not available\n");
- /* Be paranoid */
- priv->has_3d = 0;
- }
-
- if (priv->has_3d) {
- virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
+ virtio_gpu_init_features_and_caps(drv);
+ if (features[feat_3d].enabled) {
/* This doesn't mean host can scanout everything, it just means host
* hypervisor can show it. */
virtio_gpu_add_combinations(drv, render_target_formats,
@@ -328,16 +635,43 @@ static int virtio_gpu_init(struct driver *drv)
&LINEAR_METADATA, BO_USE_TEXTURE_MASK);
virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
BO_USE_SW_MASK | BO_USE_LINEAR);
+ virtio_gpu_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
+ BO_USE_SW_MASK | BO_USE_LINEAR);
}
/* Android CTS tests require this. */
+ virtio_gpu_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
+ virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
+ BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
+
+ if (!priv->host_gbm_enabled) {
+ drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
+ BO_USE_HW_VIDEO_DECODER);
+ drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+ }
return drv_modify_linear_combinations(drv);
}
@@ -348,11 +682,95 @@ static void virtio_gpu_close(struct driver *drv)
drv->priv = NULL;
}
+static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
+{
+ int ret;
+ uint32_t stride;
+ uint32_t cur_blob_id;
+ uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
+ struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ if (bo->meta.use_flags & BO_USE_SW_MASK)
+ blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
+ if (bo->meta.use_flags & BO_USE_NON_GPU_HW)
+ blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
+
+ cur_blob_id = atomic_fetch_add(&priv->next_blob_id, 1);
+ stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
+ drv_bo_from_format(bo, stride, bo->meta.height, bo->meta.format);
+ bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
+ bo->meta.tiling = blob_flags;
+
+ cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
+ cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
+ cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
+ cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
+ cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
+ cmd[VIRGL_PIPE_RES_CREATE_BIND] = use_flags_to_bind(bo->meta.use_flags);
+ cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
+ cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = cur_blob_id;
+
+ drm_rc_blob.cmd = (uint64_t)&cmd;
+ drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
+ drm_rc_blob.size = bo->meta.total_size;
+ drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
+ drm_rc_blob.blob_flags = blob_flags;
+ drm_rc_blob.blob_id = cur_blob_id;
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
+ if (ret < 0) {
+ drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
+ return -errno;
+ }
+
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
+ bo->handles[plane].u32 = drm_rc_blob.bo_handle;
+
+ return 0;
+}
+
+static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ // TODO(gurchetansingh): remove once all minigbm users are blob-safe
+#ifndef VIRTIO_GPU_NEXT
+ return false;
+#endif
+
+ // Only use blob when host gbm is available
+ if (!priv->host_gbm_enabled)
+ return false;
+
+ // Use regular resources if only the GPU needs efficient access
+ if (!(use_flags &
+ (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_LINEAR | BO_USE_NON_GPU_HW)))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_YVU420_ANDROID:
+ case DRM_FORMAT_R8:
+ // Formats with strictly defined strides are supported
+ return true;
+ case DRM_FORMAT_NV12:
+ // Knowing buffer metadata at buffer creation isn't yet supported, so buffers
+ // can't be properly mapped into the guest.
+ return (use_flags & BO_USE_SW_MASK) == 0;
+ default:
+ return false;
+ }
+}
+
static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
- if (priv->has_3d)
+ if (features[feat_resource_blob].enabled && features[feat_host_visible].enabled &&
+ should_use_blob(bo->drv, format, use_flags))
+ return virtio_gpu_bo_create_blob(bo->drv, bo);
+
+ if (features[feat_3d].enabled)
return virtio_virgl_bo_create(bo, width, height, format, use_flags);
else
return virtio_dumb_bo_create(bo, width, height, format, use_flags);
@@ -360,8 +778,7 @@ static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height,
static int virtio_gpu_bo_destroy(struct bo *bo)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
- if (priv->has_3d)
+ if (features[feat_3d].enabled)
return drv_gem_bo_destroy(bo);
else
return drv_dumb_bo_destroy(bo);
@@ -369,8 +786,7 @@ static int virtio_gpu_bo_destroy(struct bo *bo)
static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
- if (priv->has_3d)
+ if (features[feat_3d].enabled)
return virtio_virgl_bo_map(bo, vma, plane, map_flags);
else
return drv_dumb_bo_map(bo, vma, plane, map_flags);
@@ -379,11 +795,13 @@ static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uin
static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
int ret;
- struct drm_virtgpu_3d_transfer_from_host xfer;
+ size_t i;
+ struct drm_virtgpu_3d_transfer_from_host xfer = { 0 };
+ struct drm_virtgpu_3d_wait waitcmd = { 0 };
+ struct virtio_transfers_params xfer_params;
struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
- struct drm_virtgpu_3d_wait waitcmd;
- if (!priv->has_3d)
+ if (!features[feat_3d].enabled)
return 0;
// Invalidate is only necessary if the host writes to the buffer.
@@ -391,35 +809,66 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
return 0;
- memset(&xfer, 0, sizeof(xfer));
+ if (features[feat_resource_blob].enabled &&
+ (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return 0;
+
xfer.bo_handle = mapping->vma->handle;
- xfer.box.x = mapping->rect.x;
- xfer.box.y = mapping->rect.y;
- xfer.box.w = mapping->rect.width;
- xfer.box.h = mapping->rect.height;
- xfer.box.d = 1;
+
+ if (mapping->rect.x || mapping->rect.y) {
+ /*
+ * virglrenderer uses the box parameters and assumes that offset == 0 for planar
+ * images
+ */
+ if (bo->meta.num_planes == 1) {
+ xfer.offset =
+ (bo->meta.strides[0] * mapping->rect.y) +
+ drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
+ }
+ }
if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
- // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
- // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). For gbm
- // based resources, we can work around this by using the level field to pass
- // the stride to virglrenderer's gbm transfer code. However, we need to avoid
- // doing this for resources which don't rely on that transfer code, which is
- // resources with the BO_USE_RENDERING flag set.
+ // Unfortunately, the kernel doesn't actually pass the guest layer_stride
+ // and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
+ // For gbm based resources, we can work around this by using the level field
+ // to pass the stride to virglrenderer's gbm transfer code. However, we need
+ // to avoid doing this for resources which don't rely on that transfer code,
+ // which is resources with the BO_USE_RENDERING flag set.
// TODO(b/145993887): Send also stride when the patches are landed
- xfer.level = bo->meta.strides[0];
+ if (priv->host_gbm_enabled) {
+ xfer.level = bo->meta.strides[0];
+ }
}
- ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
- if (ret) {
- drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
- return -errno;
+ if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
+ bo->meta.use_flags)) {
+ xfer_params.xfers_needed = 1;
+ xfer_params.xfer_boxes[0] = mapping->rect;
+ } else {
+ assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
+ bo->meta.use_flags));
+
+ virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
+ }
+
+ for (i = 0; i < xfer_params.xfers_needed; i++) {
+ xfer.box.x = xfer_params.xfer_boxes[i].x;
+ xfer.box.y = xfer_params.xfer_boxes[i].y;
+ xfer.box.w = xfer_params.xfer_boxes[i].width;
+ xfer.box.h = xfer_params.xfer_boxes[i].height;
+ xfer.box.d = 1;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n",
+ strerror(errno));
+ return -errno;
+ }
}
// The transfer needs to complete before invalidate returns so that any host changes
// are visible and to ensure the host doesn't overwrite subsequent guest changes.
// TODO(b/136733358): Support returning fences from transfers
- memset(&waitcmd, 0, sizeof(waitcmd));
waitcmd.handle = mapping->vma->handle;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
if (ret) {
@@ -433,33 +882,67 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
{
int ret;
- struct drm_virtgpu_3d_transfer_to_host xfer;
+ size_t i;
+ struct drm_virtgpu_3d_transfer_to_host xfer = { 0 };
+ struct drm_virtgpu_3d_wait waitcmd = { 0 };
+ struct virtio_transfers_params xfer_params;
struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
- struct drm_virtgpu_3d_wait waitcmd;
- if (!priv->has_3d)
+ if (!features[feat_3d].enabled)
return 0;
if (!(mapping->vma->map_flags & BO_MAP_WRITE))
return 0;
- memset(&xfer, 0, sizeof(xfer));
+ if (features[feat_resource_blob].enabled &&
+ (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return 0;
+
xfer.bo_handle = mapping->vma->handle;
- xfer.box.x = mapping->rect.x;
- xfer.box.y = mapping->rect.y;
- xfer.box.w = mapping->rect.width;
- xfer.box.h = mapping->rect.height;
- xfer.box.d = 1;
+
+ if (mapping->rect.x || mapping->rect.y) {
+ /*
+ * virglrenderer uses the box parameters and assumes that offset == 0 for planar
+ * images
+ */
+ if (bo->meta.num_planes == 1) {
+ xfer.offset =
+ (bo->meta.strides[0] * mapping->rect.y) +
+ drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
+ }
+ }
// Unfortunately, the kernel doesn't actually pass the guest layer_stride and
// guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
// the level to work around this.
- xfer.level = bo->meta.strides[0];
+ if (priv->host_gbm_enabled) {
+ xfer.level = bo->meta.strides[0];
+ }
- ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
- if (ret) {
- drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
- return -errno;
+ if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
+ bo->meta.use_flags)) {
+ xfer_params.xfers_needed = 1;
+ xfer_params.xfer_boxes[0] = mapping->rect;
+ } else {
+ assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
+ bo->meta.use_flags));
+
+ virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
+ }
+
+ for (i = 0; i < xfer_params.xfers_needed; i++) {
+ xfer.box.x = xfer_params.xfer_boxes[i].x;
+ xfer.box.y = xfer_params.xfer_boxes[i].y;
+ xfer.box.w = xfer_params.xfer_boxes[i].width;
+ xfer.box.h = xfer_params.xfer_boxes[i].height;
+ xfer.box.d = 1;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n",
+ strerror(errno));
+ return -errno;
+ }
}
// If the buffer is only accessed by the host GPU, then the flush is ordered
@@ -467,7 +950,6 @@ static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
// buffer, we need to wait for the transfer to complete for consistency.
// TODO(b/136733358): Support returning fences from transfers
if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
- memset(&waitcmd, 0, sizeof(waitcmd));
waitcmd.handle = mapping->vma->handle;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
@@ -482,8 +964,6 @@ static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
-
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
/* Camera subsystem requires NV12. */
@@ -496,10 +976,10 @@ static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, u
* All of our host drivers prefer NV12 as their flexible media format.
* If that changes, this will need to be modified.
*/
- if (priv->has_3d)
+ if (features[feat_3d].enabled)
return DRM_FORMAT_NV12;
else
- return DRM_FORMAT_YVU420;
+ return DRM_FORMAT_YVU420_ANDROID;
default:
return format;
}
@@ -509,13 +989,11 @@ static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLAN
uint32_t offsets[DRV_MAX_PLANES])
{
int ret;
- struct drm_virtgpu_resource_info res_info;
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ struct drm_virtgpu_resource_info res_info = { 0 };
- if (!priv->has_3d)
+ if (!features[feat_3d].enabled)
return 0;
- memset(&res_info, 0, sizeof(res_info));
res_info.bo_handle = bo->handles[0].u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
if (ret) {