summaryrefslogtreecommitdiff
path: root/chromium/third_party/minigbm
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-08-28 15:28:34 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-08-28 13:54:51 +0000
commit2a19c63448c84c1805fb1a585c3651318bb86ca7 (patch)
treeeb17888e8531aa6ee5e85721bd553b832a7e5156 /chromium/third_party/minigbm
parentb014812705fc80bff0a5c120dfcef88f349816dc (diff)
downloadqtwebengine-chromium-2a19c63448c84c1805fb1a585c3651318bb86ca7.tar.gz
BASELINE: Update Chromium to 69.0.3497.70
Change-Id: I2b7b56e4e7a8b26656930def0d4575dc32b900a0 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/minigbm')
-rw-r--r--chromium/third_party/minigbm/BUILD.gn3
-rw-r--r--chromium/third_party/minigbm/src/Android.mk69
-rw-r--r--chromium/third_party/minigbm/src/Makefile5
-rw-r--r--chromium/third_party/minigbm/src/amdgpu.c453
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/Makefile4
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc8
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc40
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc19
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h8
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc44
-rw-r--r--chromium/third_party/minigbm/src/dri.c293
-rw-r--r--chromium/third_party/minigbm/src/dri.h37
-rw-r--r--chromium/third_party/minigbm/src/drv.c101
-rw-r--r--chromium/third_party/minigbm/src/drv.h15
-rw-r--r--chromium/third_party/minigbm/src/drv_priv.h6
-rw-r--r--chromium/third_party/minigbm/src/exynos.c7
-rw-r--r--chromium/third_party/minigbm/src/gbm.c7
-rw-r--r--chromium/third_party/minigbm/src/gbm.h4
-rw-r--r--chromium/third_party/minigbm/src/gbm_helpers.c2
-rw-r--r--chromium/third_party/minigbm/src/helpers.c167
-rw-r--r--chromium/third_party/minigbm/src/helpers.h1
-rw-r--r--chromium/third_party/minigbm/src/i915.c165
-rw-r--r--chromium/third_party/minigbm/src/mediatek.c5
-rw-r--r--chromium/third_party/minigbm/src/msm.c2
-rw-r--r--chromium/third_party/minigbm/src/rockchip.c7
-rw-r--r--chromium/third_party/minigbm/src/tegra.c6
-rw-r--r--chromium/third_party/minigbm/src/util.h3
-rw-r--r--chromium/third_party/minigbm/src/vc4.c7
-rw-r--r--chromium/third_party/minigbm/src/virtio_dumb.c71
-rw-r--r--chromium/third_party/minigbm/src/virtio_gpu.c (renamed from chromium/third_party/minigbm/src/virtio_virgl.c)132
30 files changed, 972 insertions, 719 deletions
diff --git a/chromium/third_party/minigbm/BUILD.gn b/chromium/third_party/minigbm/BUILD.gn
index b8b212ef977..ab0ff8a60ab 100644
--- a/chromium/third_party/minigbm/BUILD.gn
+++ b/chromium/third_party/minigbm/BUILD.gn
@@ -87,8 +87,7 @@ if (!use_system_minigbm) {
"src/udl.c",
"src/vc4.c",
"src/vgem.c",
- "src/virtio_dumb.c",
- "src/virtio_virgl.c",
+ "src/virtio_gpu.c",
]
configs -= [ "//build/config/compiler:chromium_code" ]
diff --git a/chromium/third_party/minigbm/src/Android.mk b/chromium/third_party/minigbm/src/Android.mk
index 28176d258ac..9169cefba0c 100644
--- a/chromium/third_party/minigbm/src/Android.mk
+++ b/chromium/third_party/minigbm/src/Android.mk
@@ -6,20 +6,13 @@ ifeq ($(strip $(BOARD_USES_MINIGBM)), true)
MINIGBM_GRALLOC_MK := $(call my-dir)/Android.gralloc.mk
LOCAL_PATH := $(call my-dir)
intel_drivers := i915 i965
-include $(CLEAR_VARS)
-
-SUBDIRS := cros_gralloc
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libdrm
-LOCAL_SRC_FILES := \
+MINIGBM_SRC := \
amdgpu.c \
- cirrus.c \
drv.c \
evdi.c \
exynos.c \
+ helpers_array.c \
helpers.c \
i915.c \
marvell.c \
@@ -27,6 +20,7 @@ LOCAL_SRC_FILES := \
meson.c \
msm.c \
nouveau.c \
+ radeon.c \
rockchip.c \
tegra.c \
udl.c \
@@ -34,19 +28,39 @@ LOCAL_SRC_FILES := \
vgem.c \
virtio_gpu.c
-include $(MINIGBM_GRALLOC_MK)
-
-LOCAL_CPPFLAGS += -std=c++14 -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64
-LOCAL_CFLAGS += -Wall -Wsign-compare -Wpointer-arith \
- -Wcast-qual -Wcast-align \
- -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64
+MINIGBM_CPPFLAGS := -std=c++14
+MINIGBM_CFLAGS := \
+ -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64 \
+ -Wall -Wsign-compare -Wpointer-arith \
+ -Wcast-qual -Wcast-align \
+ -Wno-unused-parameter
ifneq ($(filter $(intel_drivers), $(BOARD_GPU_DRIVERS)),)
-LOCAL_CPPFLAGS += -DDRV_I915
-LOCAL_CFLAGS += -DDRV_I915
+MINIGBM_CPPFLAGS += -DDRV_I915
+MINIGBM_CFLAGS += -DDRV_I915
LOCAL_SHARED_LIBRARIES += libdrm_intel
endif
+ifneq ($(filter meson, $(BOARD_GPU_DRIVERS)),)
+MINIGBM_CPPFLAGS += -DDRV_MESON
+MINIGBM_CFLAGS += -DDRV_MESON
+endif
+
+include $(CLEAR_VARS)
+
+SUBDIRS := cros_gralloc
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libdrm
+
+LOCAL_SRC_FILES := $(MINIGBM_SRC)
+
+include $(MINIGBM_GRALLOC_MK)
+
+LOCAL_CFLAGS := $(MINIGBM_CFLAGS)
+LOCAL_CPPFLAGS := $(MINIGBM_CPPFLAGS)
+
LOCAL_MODULE := gralloc.$(TARGET_BOARD_PLATFORM)
LOCAL_MODULE_TAGS := optional
# The preferred path for vendor HALs is /vendor/lib/hw
@@ -54,6 +68,25 @@ LOCAL_PROPRIETARY_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := hw
LOCAL_MODULE_CLASS := SHARED_LIBRARIES
LOCAL_MODULE_SUFFIX := $(TARGET_SHLIB_SUFFIX)
+LOCAL_HEADER_LIBRARIES += \
+ libhardware_headers libnativebase_headers libsystem_headers
+LOCAL_SHARED_LIBRARIES += libnativewindow libsync liblog
+LOCAL_STATIC_LIBRARIES += libarect
+include $(BUILD_SHARED_LIBRARY)
+
+
+include $(CLEAR_VARS)
+LOCAL_SHARED_LIBRARIES := libcutils
+LOCAL_STATIC_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES += $(MINIGBM_SRC) gbm.c gbm_helpers.c
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+LOCAL_CFLAGS := $(MINIGBM_CFLAGS)
+LOCAL_CPPFLAGS := $(MINIGBM_CPPFLAGS)
+
+LOCAL_MODULE := libminigbm
+LOCAL_MODULE_TAGS := optional
include $(BUILD_SHARED_LIBRARY)
-#endif
+endif
diff --git a/chromium/third_party/minigbm/src/Makefile b/chromium/third_party/minigbm/src/Makefile
index 358cd7b8c78..35f92f21102 100644
--- a/chromium/third_party/minigbm/src/Makefile
+++ b/chromium/third_party/minigbm/src/Makefile
@@ -14,7 +14,7 @@ CFLAGS += -std=c99 -Wall -Wsign-compare -Wpointer-arith -Wcast-qual \
ifdef DRV_AMDGPU
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_amdgpu)
- LDLIBS += -lamdgpuaddr
+ LDLIBS += -ldrm_amdgpu -ldl
endif
ifdef DRV_EXYNOS
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_exynos)
@@ -31,6 +31,9 @@ endif
ifdef DRV_ROCKCHIP
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_rockchip)
endif
+ifdef DRV_VC4
+ CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_vc4)
+endif
CPPFLAGS += $(PC_CFLAGS)
LDLIBS += $(PC_LIBS)
diff --git a/chromium/third_party/minigbm/src/amdgpu.c b/chromium/third_party/minigbm/src/amdgpu.c
index f6c054763ae..83d300c402d 100644
--- a/chromium/third_party/minigbm/src/amdgpu.c
+++ b/chromium/third_party/minigbm/src/amdgpu.c
@@ -13,33 +13,25 @@
#include <sys/mman.h>
#include <xf86drm.h>
-#include "addrinterface.h"
+#include "dri.h"
#include "drv_priv.h"
#include "helpers.h"
#include "util.h"
-#ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
-#define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
+#ifdef __ANDROID__
+#define DRI_PATH "/vendor/lib/dri/radeonsi_dri.so"
+#else
+#define DRI_PATH "/usr/lib64/dri/radeonsi_dri.so"
#endif
-// clang-format off
-#define mmCC_RB_BACKEND_DISABLE 0x263d
-#define mmGB_TILE_MODE0 0x2644
-#define mmGB_MACROTILE_MODE0 0x2664
-#define mmGB_ADDR_CONFIG 0x263e
-#define mmMC_ARB_RAMCFG 0x9d8
-
-enum {
- FAMILY_UNKNOWN,
- FAMILY_SI,
- FAMILY_CI,
- FAMILY_KV,
- FAMILY_VI,
- FAMILY_CZ,
- FAMILY_PI,
- FAMILY_LAST,
+#define TILE_TYPE_LINEAR 0
+/* DRI backend decides tiling in this case. */
+#define TILE_TYPE_DRI 1
+
+struct amdgpu_priv {
+ struct dri_driver dri;
+ int drm_version;
};
-// clang-format on
const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
@@ -48,378 +40,181 @@ const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMA
const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, DRM_FORMAT_NV21,
DRM_FORMAT_NV12, DRM_FORMAT_YVU420_ANDROID };
-static int amdgpu_set_metadata(int fd, uint32_t handle, struct amdgpu_bo_metadata *info)
-{
- struct drm_amdgpu_gem_metadata args = { 0 };
-
- if (!info)
- return -EINVAL;
-
- args.handle = handle;
- args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
- args.data.flags = info->flags;
- args.data.tiling_info = info->tiling_info;
-
- if (info->size_metadata > sizeof(args.data.data))
- return -EINVAL;
-
- if (info->size_metadata) {
- args.data.data_size_bytes = info->size_metadata;
- memcpy(args.data.data, info->umd_metadata, info->size_metadata);
- }
-
- return drmCommandWriteRead(fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args));
-}
-
-static int amdgpu_read_mm_regs(int fd, unsigned dword_offset, unsigned count, uint32_t instance,
- uint32_t flags, uint32_t *values)
-{
- struct drm_amdgpu_info request;
-
- memset(&request, 0, sizeof(request));
- request.return_pointer = (uintptr_t)values;
- request.return_size = count * sizeof(uint32_t);
- request.query = AMDGPU_INFO_READ_MMR_REG;
- request.read_mmr_reg.dword_offset = dword_offset;
- request.read_mmr_reg.count = count;
- request.read_mmr_reg.instance = instance;
- request.read_mmr_reg.flags = flags;
-
- return drmCommandWrite(fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
-}
-
-static int amdgpu_query_gpu(int fd, struct amdgpu_gpu_info *gpu_info)
-{
- int ret;
- uint32_t instance;
-
- if (!gpu_info)
- return -EINVAL;
-
- instance = AMDGPU_INFO_MMR_SH_INDEX_MASK << AMDGPU_INFO_MMR_SH_INDEX_SHIFT;
-
- ret = amdgpu_read_mm_regs(fd, mmCC_RB_BACKEND_DISABLE, 1, instance, 0,
- &gpu_info->backend_disable[0]);
- if (ret)
- return ret;
- /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
- gpu_info->backend_disable[0] = (gpu_info->backend_disable[0] >> 16) & 0xff;
-
- ret = amdgpu_read_mm_regs(fd, mmGB_TILE_MODE0, 32, 0xffffffff, 0, gpu_info->gb_tile_mode);
- if (ret)
- return ret;
-
- ret = amdgpu_read_mm_regs(fd, mmGB_MACROTILE_MODE0, 16, 0xffffffff, 0,
- gpu_info->gb_macro_tile_mode);
- if (ret)
- return ret;
-
- ret = amdgpu_read_mm_regs(fd, mmGB_ADDR_CONFIG, 1, 0xffffffff, 0, &gpu_info->gb_addr_cfg);
- if (ret)
- return ret;
-
- ret = amdgpu_read_mm_regs(fd, mmMC_ARB_RAMCFG, 1, 0xffffffff, 0, &gpu_info->mc_arb_ramcfg);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void *ADDR_API alloc_sys_mem(const ADDR_ALLOCSYSMEM_INPUT *in)
-{
- return malloc(in->sizeInBytes);
-}
-
-static ADDR_E_RETURNCODE ADDR_API free_sys_mem(const ADDR_FREESYSMEM_INPUT *in)
-{
- free(in->pVirtAddr);
- return ADDR_OK;
-}
-
-static int amdgpu_addrlib_compute(void *addrlib, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags, uint32_t *tiling_flags,
- ADDR_COMPUTE_SURFACE_INFO_OUTPUT *addr_out)
-{
- ADDR_COMPUTE_SURFACE_INFO_INPUT addr_surf_info_in = { 0 };
- ADDR_TILEINFO addr_tile_info = { 0 };
- ADDR_TILEINFO addr_tile_info_out = { 0 };
- uint32_t bits_per_pixel;
-
- addr_surf_info_in.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
-
- /* Set the requested tiling mode. */
- addr_surf_info_in.tileMode = ADDR_TM_2D_TILED_THIN1;
- if (use_flags &
- (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
- addr_surf_info_in.tileMode = ADDR_TM_LINEAR_ALIGNED;
- else if (width <= 16 || height <= 16)
- addr_surf_info_in.tileMode = ADDR_TM_1D_TILED_THIN1;
-
- bits_per_pixel = drv_stride_from_format(format, 1, 0) * 8;
- /* Bits per pixel should be calculated from format*/
- addr_surf_info_in.bpp = bits_per_pixel;
- addr_surf_info_in.numSamples = 1;
- addr_surf_info_in.width = width;
- addr_surf_info_in.height = height;
- addr_surf_info_in.numSlices = 1;
- addr_surf_info_in.pTileInfo = &addr_tile_info;
- addr_surf_info_in.tileIndex = -1;
-
- /* This disables incorrect calculations (hacks) in addrlib. */
- addr_surf_info_in.flags.noStencil = 1;
-
- /* Set the micro tile type. */
- if (use_flags & BO_USE_SCANOUT)
- addr_surf_info_in.tileType = ADDR_DISPLAYABLE;
- else
- addr_surf_info_in.tileType = ADDR_NON_DISPLAYABLE;
-
- addr_out->size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
- addr_out->pTileInfo = &addr_tile_info_out;
-
- if (AddrComputeSurfaceInfo(addrlib, &addr_surf_info_in, addr_out) != ADDR_OK)
- return -EINVAL;
-
- ADDR_CONVERT_TILEINFOTOHW_INPUT s_in = { 0 };
- ADDR_CONVERT_TILEINFOTOHW_OUTPUT s_out = { 0 };
- ADDR_TILEINFO s_tile_hw_info_out = { 0 };
-
- s_in.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_INPUT);
- /* Convert from real value to HW value */
- s_in.reverse = 0;
- s_in.pTileInfo = &addr_tile_info_out;
- s_in.tileIndex = -1;
-
- s_out.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_OUTPUT);
- s_out.pTileInfo = &s_tile_hw_info_out;
-
- if (AddrConvertTileInfoToHW(addrlib, &s_in, &s_out) != ADDR_OK)
- return -EINVAL;
-
- if (addr_out->tileMode >= ADDR_TM_2D_TILED_THIN1)
- /* 2D_TILED_THIN1 */
- *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4);
- else if (addr_out->tileMode >= ADDR_TM_1D_TILED_THIN1)
- /* 1D_TILED_THIN1 */
- *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2);
- else
- /* LINEAR_ALIGNED */
- *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1);
-
- *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, drv_log_base2(addr_tile_info_out.bankWidth));
- *tiling_flags |=
- AMDGPU_TILING_SET(BANK_HEIGHT, drv_log_base2(addr_tile_info_out.bankHeight));
- *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, s_tile_hw_info_out.tileSplitBytes);
- *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT,
- drv_log_base2(addr_tile_info_out.macroAspectRatio));
- *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, s_tile_hw_info_out.pipeConfig);
- *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, s_tile_hw_info_out.banks);
-
- return 0;
-}
-
-static void *amdgpu_addrlib_init(int fd)
-{
- int ret;
- ADDR_CREATE_INPUT addr_create_input = { 0 };
- ADDR_CREATE_OUTPUT addr_create_output = { 0 };
- ADDR_REGISTER_VALUE reg_value = { 0 };
- ADDR_CREATE_FLAGS create_flags = { { 0 } };
- ADDR_E_RETURNCODE addr_ret;
-
- addr_create_input.size = sizeof(ADDR_CREATE_INPUT);
- addr_create_output.size = sizeof(ADDR_CREATE_OUTPUT);
-
- struct amdgpu_gpu_info gpu_info = { 0 };
-
- ret = amdgpu_query_gpu(fd, &gpu_info);
-
- if (ret) {
- fprintf(stderr, "[%s]failed with error =%d\n", __func__, ret);
- return NULL;
- }
-
- reg_value.noOfBanks = gpu_info.mc_arb_ramcfg & 0x3;
- reg_value.gbAddrConfig = gpu_info.gb_addr_cfg;
- reg_value.noOfRanks = (gpu_info.mc_arb_ramcfg & 0x4) >> 2;
-
- reg_value.backendDisables = gpu_info.backend_disable[0];
- reg_value.pTileConfig = gpu_info.gb_tile_mode;
- reg_value.noOfEntries = sizeof(gpu_info.gb_tile_mode) / sizeof(gpu_info.gb_tile_mode[0]);
- reg_value.pMacroTileConfig = gpu_info.gb_macro_tile_mode;
- reg_value.noOfMacroEntries =
- sizeof(gpu_info.gb_macro_tile_mode) / sizeof(gpu_info.gb_macro_tile_mode[0]);
- create_flags.value = 0;
- create_flags.useTileIndex = 1;
-
- addr_create_input.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
-
- addr_create_input.chipFamily = FAMILY_CZ;
- addr_create_input.createFlags = create_flags;
- addr_create_input.callbacks.allocSysMem = alloc_sys_mem;
- addr_create_input.callbacks.freeSysMem = free_sys_mem;
- addr_create_input.callbacks.debugPrint = 0;
- addr_create_input.regValue = reg_value;
-
- addr_ret = AddrCreate(&addr_create_input, &addr_create_output);
-
- if (addr_ret != ADDR_OK) {
- fprintf(stderr, "[%s]failed error =%d\n", __func__, addr_ret);
- return NULL;
- }
-
- return addr_create_output.hLib;
-}
-
static int amdgpu_init(struct driver *drv)
{
- void *addrlib;
+ struct amdgpu_priv *priv;
+ drmVersionPtr drm_version;
struct format_metadata metadata;
uint64_t use_flags = BO_USE_RENDER_MASK;
- addrlib = amdgpu_addrlib_init(drv_get_fd(drv));
- if (!addrlib)
- return -1;
+ priv = calloc(1, sizeof(struct amdgpu_priv));
+ if (!priv)
+ return -ENOMEM;
- drv->priv = addrlib;
+ drm_version = drmGetVersion(drv_get_fd(drv));
+ if (!drm_version) {
+ free(priv);
+ return -ENODEV;
+ }
- drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+ priv->drm_version = drm_version->version_minor;
+ drmFreeVersion(drm_version);
- /* YUV format for camera */
- drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
- /*
- * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
- * from camera.
- */
- drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ drv->priv = priv;
- drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_SCANOUT);
+ if (dri_init(drv, DRI_PATH, "radeonsi")) {
+ free(priv);
+ drv->priv = NULL;
+ return -ENODEV;
+ }
- metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
- metadata.priority = 2;
+ metadata.tiling = TILE_TYPE_LINEAR;
+ metadata.priority = 1;
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
&metadata, use_flags);
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &metadata, BO_USE_TEXTURE_MASK);
+
+ /* Linear formats supported by display. */
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
- metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
- metadata.priority = 3;
- metadata.modifier = DRM_FORMAT_MOD_LINEAR;
+ /* YUV formats for camera and display. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT);
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
+ drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
+
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
+ * from camera.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ /*
+ * The following formats will be allocated by the DRI backend and may be potentially tiled.
+ * Since format modifier support hasn't been implemented fully yet, it's not
+ * possible to enumerate the different types of buffers (like i915 can).
+ */
+ use_flags &= ~BO_USE_RENDERSCRIPT;
use_flags &= ~BO_USE_SW_WRITE_OFTEN;
use_flags &= ~BO_USE_SW_READ_OFTEN;
use_flags &= ~BO_USE_LINEAR;
- metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
- metadata.priority = 4;
+ metadata.tiling = TILE_TYPE_DRI;
+ metadata.priority = 2;
drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
&metadata, use_flags);
- drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
+ /* Potentially tiled formats supported by display. */
+ drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
-
- metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
- metadata.priority = 5;
-
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
-
return 0;
}
static void amdgpu_close(struct driver *drv)
{
- AddrDestroy(drv->priv);
+ dri_close(drv);
+ free(drv->priv);
drv->priv = NULL;
}
-static int amdgpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags)
{
- void *addrlib = bo->drv->priv;
- union drm_amdgpu_gem_create gem_create;
- struct amdgpu_bo_metadata metadata = { 0 };
- ADDR_COMPUTE_SURFACE_INFO_OUTPUT addr_out = { 0 };
- uint32_t tiling_flags = 0;
- size_t plane;
int ret;
+ uint32_t plane, stride;
+ struct combination *combo;
+ union drm_amdgpu_gem_create gem_create;
+ struct amdgpu_priv *priv = bo->drv->priv;
- if (format == DRM_FORMAT_NV12 || format == DRM_FORMAT_NV21) {
- drv_bo_from_format(bo, ALIGN(width, 64), height, format);
- } else if (format == DRM_FORMAT_YVU420_ANDROID) {
- drv_bo_from_format(bo, ALIGN(width, 128), height, format);
- } else {
- if (amdgpu_addrlib_compute(addrlib, width, height, format, use_flags, &tiling_flags,
- &addr_out) < 0)
- return -EINVAL;
-
- bo->tiling = tiling_flags;
- /* RGB has 1 plane only */
- bo->offsets[0] = 0;
- bo->total_size = bo->sizes[0] = addr_out.surfSize;
- bo->strides[0] = addr_out.pixelPitch * DIV_ROUND_UP(addr_out.pixelBits, 8);
- }
+ combo = drv_get_combination(bo->drv, format, use_flags);
+ if (!combo)
+ return -EINVAL;
- memset(&gem_create, 0, sizeof(gem_create));
+ if (combo->metadata.tiling == TILE_TYPE_DRI)
+ return dri_bo_create(bo, width, height, format, use_flags);
- gem_create.in.bo_size = bo->total_size;
- gem_create.in.alignment = addr_out.baseAlign;
- /* Set the placement. */
+ stride = drv_stride_from_format(format, width, 0);
+ if (format == DRM_FORMAT_YVU420_ANDROID)
+ stride = ALIGN(stride, 128);
+ else
+ stride = ALIGN(stride, 64);
+ drv_bo_from_format(bo, stride, height, format);
+
+ memset(&gem_create, 0, sizeof(gem_create));
+ gem_create.in.bo_size = bo->total_size;
+ gem_create.in.alignment = 256;
gem_create.in.domain_flags = 0;
+
if (use_flags & (BO_USE_LINEAR | BO_USE_SW))
gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (use_flags & (BO_USE_SCANOUT | BO_USE_CURSOR)) {
- /* TODO(dbehr) do not use VRAM after we enable display VM */
- gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM;
- } else {
- gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
- if (!(use_flags & BO_USE_SW_READ_OFTEN))
- gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+ if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SCANOUT)))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
+ /* If drm_version >= 21 everything exposes explicit synchronization primitives
+ and chromeos/arc++ will use them. Disable implicit synchronization. */
+ if (priv->drm_version >= 21) {
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
/* Allocate the buffer with the preferred heap. */
ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
sizeof(gem_create));
-
if (ret < 0)
return ret;
- metadata.tiling_info = tiling_flags;
-
for (plane = 0; plane < bo->num_planes; plane++)
bo->handles[plane].u32 = gem_create.out.handle;
- ret = amdgpu_set_metadata(drv_get_fd(bo->drv), bo->handles[0].u32, &metadata);
+ return 0;
+}
+
+static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
+{
+ struct combination *combo;
+ combo = drv_get_combination(bo->drv, data->format, data->use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ if (combo->metadata.tiling == TILE_TYPE_DRI)
+ return dri_bo_import(bo, data);
+ else
+ return drv_prime_bo_import(bo, data);
+}
- return ret;
+static int amdgpu_destroy_bo(struct bo *bo)
+{
+ if (bo->priv)
+ return dri_bo_destroy(bo);
+ else
+ return drv_gem_bo_destroy(bo);
}
-static void *amdgpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
union drm_amdgpu_gem_mmap gem_map;
+ if (bo->priv)
+ return dri_bo_map(bo, vma, plane, map_flags);
+
memset(&gem_map, 0, sizeof(gem_map));
gem_map.in.handle = bo->handles[plane].u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
+ drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
return MAP_FAILED;
}
@@ -429,6 +224,14 @@ static void *amdgpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_
gem_map.out.addr_ptr);
}
+static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
+{
+ if (bo->priv)
+ return dri_bo_unmap(bo, vma);
+ else
+ return munmap(vma->addr, vma->length);
+}
+
static uint32_t amdgpu_resolve_format(uint32_t format, uint64_t use_flags)
{
switch (format) {
@@ -449,11 +252,11 @@ const struct backend backend_amdgpu = {
.name = "amdgpu",
.init = amdgpu_init,
.close = amdgpu_close,
- .bo_create = amdgpu_bo_create,
- .bo_destroy = drv_gem_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = amdgpu_bo_map,
- .bo_unmap = drv_bo_munmap,
+ .bo_create = amdgpu_create_bo,
+ .bo_destroy = amdgpu_destroy_bo,
+ .bo_import = amdgpu_import_bo,
+ .bo_map = amdgpu_map_bo,
+ .bo_unmap = amdgpu_unmap_bo,
.resolve_format = amdgpu_resolve_format,
};
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/Makefile b/chromium/third_party/minigbm/src/cros_gralloc/Makefile
index 1583a6fb427..17e884fb210 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/Makefile
+++ b/chromium/third_party/minigbm/src/cros_gralloc/Makefile
@@ -21,10 +21,6 @@ CXXFLAGS += -std=c++14
CFLAGS += -std=c99
LIBS += -shared -lcutils -lhardware -lsync $(LIBDRM_LIBS)
-ifdef DRV_AMDGPU
- LIBS += -lamdgpuaddr
-endif
-
OBJS = $(foreach source, $(SOURCES), $(addsuffix .o, $(basename $(source))))
OBJECTS = $(addprefix $(TARGET_DIR), $(notdir $(OBJS)))
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc
index 47a13a2ddb9..0301af12d32 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc
@@ -56,7 +56,7 @@ int32_t cros_gralloc_buffer::lock(const struct rectangle *rect, uint32_t map_fla
* just use the first kernel buffer.
*/
if (drv_num_buffers_per_bo(bo_) != 1) {
- cros_gralloc_error("Can only support one buffer per bo.");
+ drv_log("Can only support one buffer per bo.\n");
return -EINVAL;
}
@@ -69,7 +69,7 @@ int32_t cros_gralloc_buffer::lock(const struct rectangle *rect, uint32_t map_fla
}
if (vaddr == MAP_FAILED) {
- cros_gralloc_error("Mapping failed.");
+ drv_log("Mapping failed.\n");
return -EFAULT;
}
}
@@ -84,13 +84,13 @@ int32_t cros_gralloc_buffer::lock(const struct rectangle *rect, uint32_t map_fla
int32_t cros_gralloc_buffer::unlock()
{
if (lockcount_ <= 0) {
- cros_gralloc_error("Buffer was not locked.");
+ drv_log("Buffer was not locked.\n");
return -EINVAL;
}
if (!--lockcount_) {
if (lock_data_[0]) {
- drv_bo_flush(bo_, lock_data_[0]);
+ drv_bo_flush_or_unmap(bo_, lock_data_[0]);
lock_data_[0] = nullptr;
}
}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc
index fec4aba30ec..31282bc9510 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc
@@ -90,15 +90,26 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto
uint64_t mod;
size_t num_planes;
uint32_t resolved_format;
+ uint32_t bytes_per_pixel;
+ uint64_t use_flags;
struct bo *bo;
struct cros_gralloc_handle *hnd;
resolved_format = drv_resolve_format(drv_, descriptor->drm_format, descriptor->use_flags);
+ use_flags = descriptor->use_flags;
+ /*
+ * TODO(b/79682290): ARC++ assumes NV12 is always linear and doesn't
+ * send modifiers across Wayland protocol, so we or in the
+ * BO_USE_LINEAR flag here. We need to fix ARC++ to allocate and work
+ * with tiled buffers.
+ */
+ if (resolved_format == DRM_FORMAT_NV12)
+ use_flags |= BO_USE_LINEAR;
bo = drv_bo_create(drv_, descriptor->width, descriptor->height, resolved_format,
- descriptor->use_flags);
+ use_flags);
if (!bo) {
- cros_gralloc_error("Failed to create bo.");
+ drv_log("Failed to create bo.\n");
return -ENOMEM;
}
@@ -109,7 +120,7 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto
*/
if (drv_num_buffers_per_bo(bo) != 1) {
drv_bo_destroy(bo);
- cros_gralloc_error("Can only support one buffer per bo.");
+ drv_log("Can only support one buffer per bo.\n");
return -EINVAL;
}
@@ -135,7 +146,8 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto
hnd->format = drv_bo_get_format(bo);
hnd->use_flags[0] = static_cast<uint32_t>(descriptor->use_flags >> 32);
hnd->use_flags[1] = static_cast<uint32_t>(descriptor->use_flags);
- hnd->pixel_stride = drv_bo_get_stride_in_pixels(bo);
+ bytes_per_pixel = drv_bytes_per_pixel_from_format(hnd->format, 0);
+ hnd->pixel_stride = DIV_ROUND_UP(hnd->strides[0], bytes_per_pixel);
hnd->magic = cros_gralloc_magic;
hnd->droid_format = descriptor->droid_format;
hnd->usage = descriptor->producer_usage;
@@ -157,7 +169,7 @@ int32_t cros_gralloc_driver::retain(buffer_handle_t handle)
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
- cros_gralloc_error("Invalid handle.");
+ drv_log("Invalid handle.\n");
return -EINVAL;
}
@@ -169,7 +181,7 @@ int32_t cros_gralloc_driver::retain(buffer_handle_t handle)
}
if (drmPrimeFDToHandle(drv_get_fd(drv_), hnd->fds[0], &id)) {
- cros_gralloc_error("drmPrimeFDToHandle failed.");
+ drv_log("drmPrimeFDToHandle failed.\n");
return -errno;
}
@@ -214,13 +226,13 @@ int32_t cros_gralloc_driver::release(buffer_handle_t handle)
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
- cros_gralloc_error("Invalid handle.");
+ drv_log("Invalid handle.\n");
return -EINVAL;
}
auto buffer = get_buffer(hnd);
if (!buffer) {
- cros_gralloc_error("Invalid Reference.");
+ drv_log("Invalid Reference.\n");
return -EINVAL;
}
@@ -246,13 +258,13 @@ int32_t cros_gralloc_driver::lock(buffer_handle_t handle, int32_t acquire_fence,
std::lock_guard<std::mutex> lock(mutex_);
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
- cros_gralloc_error("Invalid handle.");
+ drv_log("Invalid handle.\n");
return -EINVAL;
}
auto buffer = get_buffer(hnd);
if (!buffer) {
- cros_gralloc_error("Invalid Reference.");
+ drv_log("Invalid Reference.\n");
return -EINVAL;
}
@@ -265,13 +277,13 @@ int32_t cros_gralloc_driver::unlock(buffer_handle_t handle, int32_t *release_fen
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
- cros_gralloc_error("Invalid handle.");
+ drv_log("Invalid handle.\n");
return -EINVAL;
}
auto buffer = get_buffer(hnd);
if (!buffer) {
- cros_gralloc_error("Invalid Reference.");
+ drv_log("Invalid Reference.\n");
return -EINVAL;
}
@@ -291,13 +303,13 @@ int32_t cros_gralloc_driver::get_backing_store(buffer_handle_t handle, uint64_t
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
- cros_gralloc_error("Invalid handle.");
+ drv_log("Invalid handle.\n");
return -EINVAL;
}
auto buffer = get_buffer(hnd);
if (!buffer) {
- cros_gralloc_error("Invalid Reference.");
+ drv_log("Invalid Reference.\n");
return -EINVAL;
}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc
index e662084bd9f..c09c2b515a6 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc
@@ -6,8 +6,6 @@
#include "cros_gralloc_helpers.h"
-#include <cstdlib>
-#include <cutils/log.h>
#include <sync/sync.h>
uint32_t cros_gralloc_convert_format(int format)
@@ -66,30 +64,19 @@ int32_t cros_gralloc_sync_wait(int32_t acquire_fence)
*/
int err = sync_wait(acquire_fence, 1000);
if (err < 0) {
- cros_gralloc_error("Timed out on sync wait, err = %s", strerror(errno));
+ drv_log("Timed out on sync wait, err = %s\n", strerror(errno));
err = sync_wait(acquire_fence, -1);
if (err < 0) {
- cros_gralloc_error("sync wait error = %s", strerror(errno));
+ drv_log("sync wait error = %s\n", strerror(errno));
return -errno;
}
}
err = close(acquire_fence);
if (err) {
- cros_gralloc_error("Unable to close fence fd, err = %s", strerror(errno));
+ drv_log("Unable to close fence fd, err = %s\n", strerror(errno));
return -errno;
}
return 0;
}
-
-void cros_gralloc_log(const char *prefix, const char *file, int line, const char *format, ...)
-{
- char buf[50];
- snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
-
- va_list args;
- va_start(args, format);
- __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
- va_end(args);
-}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h
index cf90ec8cb1a..a55eebcada4 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.h
@@ -24,12 +24,4 @@ cros_gralloc_handle_t cros_gralloc_convert_handle(buffer_handle_t handle);
int32_t cros_gralloc_sync_wait(int32_t acquire_fence);
-__attribute__((format(printf, 4, 5))) void cros_gralloc_log(const char *prefix, const char *file,
- int line, const char *format, ...);
-
-#define cros_gralloc_error(...) \
- do { \
- cros_gralloc_log("CROS_GRALLOC_ERROR", __FILE__, __LINE__, __VA_ARGS__); \
- } while (0)
-
#endif
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc
index 50cd1af99e9..df1f62cb86c 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc
@@ -97,7 +97,7 @@ static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usa
int32_t ret;
bool supported;
struct cros_gralloc_buffer_descriptor descriptor;
- auto mod = (struct gralloc0_module *)dev->common.module;
+ auto mod = (struct gralloc0_module const *)dev->common.module;
descriptor.width = w;
descriptor.height = h;
@@ -113,10 +113,10 @@ static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usa
}
if (!supported) {
- cros_gralloc_error("Unsupported combination -- HAL format: %u, HAL usage: %u, "
- "drv_format: %4.4s, use_flags: %llu",
- format, usage, reinterpret_cast<char *>(&descriptor.drm_format),
- static_cast<unsigned long long>(descriptor.use_flags));
+ drv_log("Unsupported combination -- HAL format: %u, HAL usage: %u, "
+ "drv_format: %4.4s, use_flags: %llu\n",
+ format, usage, reinterpret_cast<char *>(&descriptor.drm_format),
+ static_cast<unsigned long long>(descriptor.use_flags));
return -EINVAL;
}
@@ -132,7 +132,7 @@ static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usa
static int gralloc0_free(alloc_device_t *dev, buffer_handle_t handle)
{
- auto mod = (struct gralloc0_module *)dev->common.module;
+ auto mod = (struct gralloc0_module const *)dev->common.module;
return mod->driver->release(handle);
}
@@ -151,7 +151,7 @@ static int gralloc0_init(struct gralloc0_module *mod, bool initialize_alloc)
mod->driver = std::make_unique<cros_gralloc_driver>();
if (mod->driver->init()) {
- cros_gralloc_error("Failed to initialize driver.");
+ drv_log("Failed to initialize driver.\n");
return -ENODEV;
}
@@ -171,7 +171,8 @@ static int gralloc0_init(struct gralloc0_module *mod, bool initialize_alloc)
static int gralloc0_open(const struct hw_module_t *mod, const char *name, struct hw_device_t **dev)
{
- auto module = (struct gralloc0_module *)mod;
+ auto const_module = reinterpret_cast<const struct gralloc0_module *>(mod);
+ auto module = const_cast<struct gralloc0_module *>(const_module);
if (module->initialized) {
*dev = &module->alloc->common;
@@ -179,7 +180,7 @@ static int gralloc0_open(const struct hw_module_t *mod, const char *name, struct
}
if (strcmp(name, GRALLOC_HARDWARE_GPU0)) {
- cros_gralloc_error("Incorrect device name - %s.", name);
+ drv_log("Incorrect device name - %s.\n", name);
return -EINVAL;
}
@@ -192,7 +193,8 @@ static int gralloc0_open(const struct hw_module_t *mod, const char *name, struct
static int gralloc0_register_buffer(struct gralloc_module_t const *module, buffer_handle_t handle)
{
- auto mod = (struct gralloc0_module *)module;
+ auto const_module = reinterpret_cast<const struct gralloc0_module *>(module);
+ auto mod = const_cast<struct gralloc0_module *>(const_module);
if (!mod->initialized)
if (gralloc0_init(mod, false))
@@ -203,7 +205,7 @@ static int gralloc0_register_buffer(struct gralloc_module_t const *module, buffe
static int gralloc0_unregister_buffer(struct gralloc_module_t const *module, buffer_handle_t handle)
{
- auto mod = (struct gralloc0_module *)module;
+ auto mod = (struct gralloc0_module const *)module;
return mod->driver->release(handle);
}
@@ -216,7 +218,7 @@ static int gralloc0_lock(struct gralloc_module_t const *module, buffer_handle_t
static int gralloc0_unlock(struct gralloc_module_t const *module, buffer_handle_t handle)
{
int32_t fence_fd, ret;
- auto mod = (struct gralloc0_module *)module;
+ auto mod = (struct gralloc0_module const *)module;
ret = mod->driver->unlock(handle, &fence_fd);
if (ret)
return ret;
@@ -235,7 +237,7 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
uint64_t *out_store;
buffer_handle_t handle;
uint32_t *out_width, *out_height, *out_stride;
- auto mod = (struct gralloc0_module *)module;
+ auto mod = (struct gralloc0_module const *)module;
switch (op) {
case GRALLOC_DRM_GET_STRIDE:
@@ -253,7 +255,7 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
handle = va_arg(args, buffer_handle_t);
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
- cros_gralloc_error("Invalid handle.");
+ drv_log("Invalid handle.\n");
return -EINVAL;
}
@@ -297,7 +299,7 @@ static int gralloc0_lock_async(struct gralloc_module_t const *module, buffer_han
int32_t ret;
uint32_t map_flags;
uint8_t *addr[DRV_MAX_PLANES];
- auto mod = (struct gralloc0_module *)module;
+ auto mod = (struct gralloc0_module const *)module;
struct rectangle rect = { .x = static_cast<uint32_t>(l),
.y = static_cast<uint32_t>(t),
.width = static_cast<uint32_t>(w),
@@ -305,12 +307,12 @@ static int gralloc0_lock_async(struct gralloc_module_t const *module, buffer_han
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
- cros_gralloc_error("Invalid handle.");
+ drv_log("Invalid handle.\n");
return -EINVAL;
}
if (hnd->droid_format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
- cros_gralloc_error("HAL_PIXEL_FORMAT_YCbCr_*_888 format not compatible.");
+ drv_log("HAL_PIXEL_FORMAT_YCbCr_*_888 format not compatible.\n");
return -EINVAL;
}
@@ -328,7 +330,7 @@ static int gralloc0_lock_async(struct gralloc_module_t const *module, buffer_han
static int gralloc0_unlock_async(struct gralloc_module_t const *module, buffer_handle_t handle,
int *fence_fd)
{
- auto mod = (struct gralloc0_module *)module;
+ auto mod = (struct gralloc0_module const *)module;
return mod->driver->unlock(handle, fence_fd);
}
@@ -339,7 +341,7 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
int32_t ret;
uint32_t map_flags;
uint8_t *addr[DRV_MAX_PLANES] = { nullptr, nullptr, nullptr, nullptr };
- auto mod = (struct gralloc0_module *)module;
+ auto mod = (struct gralloc0_module const *)module;
struct rectangle rect = { .x = static_cast<uint32_t>(l),
.y = static_cast<uint32_t>(t),
.width = static_cast<uint32_t>(w),
@@ -347,14 +349,14 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
- cros_gralloc_error("Invalid handle.");
+ drv_log("Invalid handle.\n");
return -EINVAL;
}
if ((hnd->droid_format != HAL_PIXEL_FORMAT_YCbCr_420_888) &&
(hnd->droid_format != HAL_PIXEL_FORMAT_YV12) &&
(hnd->droid_format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)) {
- cros_gralloc_error("Non-YUV format not compatible.");
+ drv_log("Non-YUV format not compatible.\n");
return -EINVAL;
}
diff --git a/chromium/third_party/minigbm/src/dri.c b/chromium/third_party/minigbm/src/dri.c
new file mode 100644
index 00000000000..ae491bb01d5
--- /dev/null
+++ b/chromium/third_party/minigbm/src/dri.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2017 Advanced Micro Devices. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_AMDGPU
+
+#include <assert.h>
+#include <dlfcn.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <xf86drm.h>
+
+#include "dri.h"
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const struct {
+ uint32_t drm_format;
+ int dri_image_format;
+} drm_to_dri_image_formats[] = {
+ { DRM_FORMAT_R8, __DRI_IMAGE_FORMAT_R8 },
+ { DRM_FORMAT_GR88, __DRI_IMAGE_FORMAT_GR88 },
+ { DRM_FORMAT_RGB565, __DRI_IMAGE_FORMAT_RGB565 },
+ { DRM_FORMAT_XRGB8888, __DRI_IMAGE_FORMAT_XRGB8888 },
+ { DRM_FORMAT_ARGB8888, __DRI_IMAGE_FORMAT_ARGB8888 },
+ { DRM_FORMAT_XBGR8888, __DRI_IMAGE_FORMAT_XBGR8888 },
+ { DRM_FORMAT_ABGR8888, __DRI_IMAGE_FORMAT_ABGR8888 },
+ { DRM_FORMAT_XRGB2101010, __DRI_IMAGE_FORMAT_XRGB2101010 },
+ { DRM_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ARGB2101010 },
+};
+
+static int drm_format_to_dri_format(uint32_t drm_format)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(drm_to_dri_image_formats); i++) {
+ if (drm_to_dri_image_formats[i].drm_format == drm_format)
+ return drm_to_dri_image_formats[i].dri_image_format;
+ }
+
+ return 0;
+}
+
+static bool lookup_extension(const __DRIextension *const *extensions, const char *name,
+ int min_version, const __DRIextension **dst)
+{
+ while (*extensions) {
+ if ((*extensions)->name && !strcmp((*extensions)->name, name) &&
+ (*extensions)->version >= min_version) {
+ *dst = *extensions;
+ return true;
+ }
+
+ extensions++;
+ }
+
+ return false;
+}
+
+/*
+ * The DRI GEM namespace may be different from the minigbm's driver GEM namespace. We have
+ * to import into minigbm.
+ */
+static int import_into_minigbm(struct dri_driver *dri, struct bo *bo)
+{
+ uint32_t handle;
+ int prime_fd, ret;
+
+ if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_FD, &prime_fd))
+ return -errno;
+
+ ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle);
+ if (ret) {
+ drv_log("drmPrimeFDToHandle failed with %s\n", strerror(errno));
+ return ret;
+ }
+
+ bo->handles[0].u32 = handle;
+ close(prime_fd);
+ return 0;
+}
+
+/*
+ * The caller is responsible for setting drv->priv to a structure that derives from dri_driver.
+ */
+int dri_init(struct driver *drv, const char *dri_so_path, const char *driver_suffix)
+{
+ char fname[128];
+ const __DRIextension **(*get_extensions)();
+ const __DRIextension *loader_extensions[] = { NULL };
+
+ struct dri_driver *dri = drv->priv;
+ dri->driver_handle = dlopen(dri_so_path, RTLD_NOW | RTLD_GLOBAL);
+ if (!dri->driver_handle)
+ return -ENODEV;
+
+ snprintf(fname, sizeof(fname), __DRI_DRIVER_GET_EXTENSIONS "_%s", driver_suffix);
+ get_extensions = dlsym(dri->driver_handle, fname);
+ if (!get_extensions)
+ goto free_handle;
+
+ dri->extensions = get_extensions();
+ if (!dri->extensions)
+ goto free_handle;
+
+ if (!lookup_extension(dri->extensions, __DRI_CORE, 2,
+ (const __DRIextension **)&dri->core_extension))
+ goto free_handle;
+
+ /* Version 4 for createNewScreen2 */
+ if (!lookup_extension(dri->extensions, __DRI_DRI2, 4,
+ (const __DRIextension **)&dri->dri2_extension))
+ goto free_handle;
+
+ dri->device = dri->dri2_extension->createNewScreen2(0, drv_get_fd(drv), loader_extensions,
+ dri->extensions, &dri->configs, NULL);
+ if (!dri->device)
+ goto free_handle;
+
+ dri->context =
+ dri->dri2_extension->createNewContext(dri->device, *dri->configs, NULL, NULL);
+
+ if (!dri->context)
+ goto free_screen;
+
+ if (!lookup_extension(dri->core_extension->getExtensions(dri->device), __DRI_IMAGE, 12,
+ (const __DRIextension **)&dri->image_extension))
+ goto free_context;
+
+ if (!lookup_extension(dri->core_extension->getExtensions(dri->device), __DRI2_FLUSH, 4,
+ (const __DRIextension **)&dri->flush_extension))
+ goto free_context;
+
+ return 0;
+
+free_context:
+ dri->core_extension->destroyContext(dri->context);
+free_screen:
+ dri->core_extension->destroyScreen(dri->device);
+free_handle:
+ dlclose(dri->driver_handle);
+ dri->driver_handle = NULL;
+ return -ENODEV;
+}
+
+/*
+ * The caller is responsible for freeing drv->priv.
+ */
+void dri_close(struct driver *drv)
+{
+ struct dri_driver *dri = drv->priv;
+
+ dri->core_extension->destroyContext(dri->context);
+ dri->core_extension->destroyScreen(dri->device);
+ dlclose(dri->driver_handle);
+ dri->driver_handle = NULL;
+}
+
+int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ unsigned int dri_use;
+ int ret, dri_format, stride, offset;
+ struct dri_driver *dri = bo->drv->priv;
+
+ assert(bo->num_planes == 1);
+ dri_format = drm_format_to_dri_format(format);
+
+ /* Gallium drivers require shared to get the handle and stride. */
+ dri_use = __DRI_IMAGE_USE_SHARE;
+ if (use_flags & BO_USE_SCANOUT)
+ dri_use |= __DRI_IMAGE_USE_SCANOUT;
+ if (use_flags & BO_USE_CURSOR)
+ dri_use |= __DRI_IMAGE_USE_CURSOR;
+ if (use_flags & BO_USE_LINEAR)
+ dri_use |= __DRI_IMAGE_USE_LINEAR;
+
+ bo->priv = dri->image_extension->createImage(dri->device, width, height, dri_format,
+ dri_use, NULL);
+ if (!bo->priv) {
+ ret = -errno;
+ return ret;
+ }
+
+ ret = import_into_minigbm(dri, bo);
+ if (ret)
+ goto free_image;
+
+ if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_STRIDE, &stride)) {
+ ret = -errno;
+ goto free_image;
+ }
+
+ if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) {
+ ret = -errno;
+ goto free_image;
+ }
+
+ bo->strides[0] = stride;
+ bo->sizes[0] = stride * height;
+ bo->offsets[0] = offset;
+ bo->total_size = offset + bo->sizes[0];
+ return 0;
+
+free_image:
+ dri->image_extension->destroyImage(bo->priv);
+ return ret;
+}
+
+int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data)
+{
+ int ret;
+ struct dri_driver *dri = bo->drv->priv;
+
+ assert(bo->num_planes == 1);
+
+ // clang-format off
+ bo->priv = dri->image_extension->createImageFromFds(dri->device, data->width, data->height,
+ data->format, data->fds, bo->num_planes,
+ (int *)data->strides,
+ (int *)data->offsets, NULL);
+ // clang-format on
+ if (!bo->priv)
+ return -errno;
+
+ ret = import_into_minigbm(dri, bo);
+ if (ret) {
+ dri->image_extension->destroyImage(bo->priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dri_bo_destroy(struct bo *bo)
+{
+ struct dri_driver *dri = bo->drv->priv;
+
+ assert(bo->priv);
+ dri->image_extension->destroyImage(bo->priv);
+ bo->priv = NULL;
+ return 0;
+}
+
+/*
+ * Map an image plane.
+ *
+ * This relies on the underlying driver to do a decompressing and/or de-tiling
+ * blit if necessary,
+ *
+ * This function itself is not thread-safe; we rely on the fact that the caller
+ * locks a per-driver mutex.
+ */
+void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+{
+ struct dri_driver *dri = bo->drv->priv;
+
+ /* GBM flags and DRI flags are the same. */
+ vma->addr =
+ dri->image_extension->mapImage(dri->context, bo->priv, 0, 0, bo->width, bo->height,
+ map_flags, (int *)&vma->map_strides[plane], &vma->priv);
+ if (!vma->addr)
+ return MAP_FAILED;
+
+ return vma->addr;
+}
+
+int dri_bo_unmap(struct bo *bo, struct vma *vma)
+{
+ struct dri_driver *dri = bo->drv->priv;
+
+ assert(vma->priv);
+ dri->image_extension->unmapImage(dri->context, bo->priv, vma->priv);
+
+ /*
+ * From gbm_dri.c in Mesa:
+ *
+ * "Not all DRI drivers use direct maps. They may queue up DMA operations
+ * on the mapping context. Since there is no explicit gbm flush mechanism,
+ * we need to flush here."
+ */
+
+ dri->flush_extension->flush_with_flags(dri->context, NULL, __DRI2_FLUSH_CONTEXT, 0);
+ return 0;
+}
+
+#endif
diff --git a/chromium/third_party/minigbm/src/dri.h b/chromium/third_party/minigbm/src/dri.h
new file mode 100644
index 00000000000..d01bc5d5457
--- /dev/null
+++ b/chromium/third_party/minigbm/src/dri.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Advanced Micro Devices. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_AMDGPU
+
+typedef int GLint;
+typedef unsigned int GLuint;
+typedef unsigned char GLboolean;
+
+#include "GL/internal/dri_interface.h"
+#include "drv.h"
+
+struct dri_driver {
+ void *driver_handle;
+ __DRIscreen *device;
+ __DRIcontext *context; /* Needed for map/unmap operations. */
+ const __DRIextension **extensions;
+ const __DRIcoreExtension *core_extension;
+ const __DRIdri2Extension *dri2_extension;
+ const __DRIimageExtension *image_extension;
+ const __DRI2flushExtension *flush_extension;
+ const __DRIconfig **configs;
+};
+
+int dri_init(struct driver *drv, const char *dri_so_path, const char *driver_suffix);
+void dri_close(struct driver *drv);
+int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags);
+int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data);
+int dri_bo_destroy(struct bo *bo);
+void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags);
+int dri_bo_unmap(struct bo *bo, struct vma *vma);
+
+#endif
diff --git a/chromium/third_party/minigbm/src/drv.c b/chromium/third_party/minigbm/src/drv.c
index d5484482102..bc1f782d009 100644
--- a/chromium/third_party/minigbm/src/drv.c
+++ b/chromium/third_party/minigbm/src/drv.c
@@ -16,6 +16,11 @@
#include <unistd.h>
#include <xf86drm.h>
+#ifdef __ANDROID__
+#include <cutils/log.h>
+#include <libgen.h>
+#endif
+
#include "drv_priv.h"
#include "helpers.h"
#include "util.h"
@@ -362,7 +367,7 @@ struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
seek_end = lseek(data->fds[plane], 0, SEEK_END);
if (seek_end == (off_t)(-1)) {
- fprintf(stderr, "drv: lseek() failed with %s\n", strerror(errno));
+ drv_log("lseek() failed with %s\n", strerror(errno));
goto destroy_bo;
}
@@ -373,7 +378,7 @@ struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
bo->sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
if ((int64_t)bo->offsets[plane] + bo->sizes[plane] > seek_end) {
- fprintf(stderr, "drv: buffer size is too large.\n");
+ drv_log("buffer size is too large.\n");
goto destroy_bo;
}
@@ -435,6 +440,7 @@ void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags
}
mapping.vma = calloc(1, sizeof(*mapping.vma));
+ memcpy(mapping.vma->map_strides, bo->strides, sizeof(mapping.vma->map_strides));
addr = bo->drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
if (addr == MAP_FAILED) {
*map_data = NULL;
@@ -461,9 +467,7 @@ exact_match:
int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
{
uint32_t i;
- int ret = drv_bo_flush(bo, mapping);
- if (ret)
- return ret;
+ int ret = 0;
pthread_mutex_lock(&bo->drv->driver_lock);
@@ -502,7 +506,7 @@ int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
return ret;
}
-int drv_bo_flush(struct bo *bo, struct mapping *mapping)
+int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
{
int ret = 0;
@@ -514,6 +518,8 @@ int drv_bo_flush(struct bo *bo, struct mapping *mapping)
if (bo->drv->backend->bo_flush)
ret = bo->drv->backend->bo_flush(bo, mapping);
+ else
+ ret = drv_bo_unmap(bo, mapping);
return ret;
}
@@ -555,6 +561,10 @@ int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
+ // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
+ if (ret)
+ ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
+
return (ret) ? ret : fd;
}
@@ -595,69 +605,6 @@ uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_fl
return format;
}
-size_t drv_num_planes_from_format(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_ABGR4444:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_AYUV:
- case DRM_FORMAT_BGR233:
- case DRM_FORMAT_BGR565:
- case DRM_FORMAT_BGR888:
- case DRM_FORMAT_BGRA1010102:
- case DRM_FORMAT_BGRA4444:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_BGRA8888:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_BGRX4444:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_BGRX8888:
- case DRM_FORMAT_C8:
- case DRM_FORMAT_GR88:
- case DRM_FORMAT_R8:
- case DRM_FORMAT_RG88:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_RGBA4444:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_RGBX4444:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_XBGR4444:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- return 1;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- return 2;
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YVU420_ANDROID:
- return 3;
- }
-
- fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format);
- return 0;
-}
-
uint32_t drv_num_buffers_per_bo(struct bo *bo)
{
uint32_t count = 0;
@@ -673,3 +620,19 @@ uint32_t drv_num_buffers_per_bo(struct bo *bo)
return count;
}
+
+void drv_log_prefix(const char *prefix, const char *file, int line, const char *format, ...)
+{
+ char buf[50];
+ snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
+
+ va_list args;
+ va_start(args, format);
+#ifdef __ANDROID__
+ __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
+#else
+ fprintf(stderr, "%s ", buf);
+ vfprintf(stderr, format, args);
+#endif
+ va_end(args);
+}
diff --git a/chromium/third_party/minigbm/src/drv.h b/chromium/third_party/minigbm/src/drv.h
index 18653e5b93a..d27e8459098 100644
--- a/chromium/third_party/minigbm/src/drv.h
+++ b/chromium/third_party/minigbm/src/drv.h
@@ -37,6 +37,8 @@ extern "C" {
#define BO_USE_CAMERA_READ (1ull << 14)
#define BO_USE_RENDERSCRIPT (1ull << 16)
#define BO_USE_TEXTURE (1ull << 17)
+#define BO_USE_HW_VIDEO_DECODER (1ull << 18)
+
/* Map flags */
#define BO_MAP_NONE 0
@@ -84,6 +86,7 @@ struct vma {
uint32_t handle;
uint32_t map_flags;
int32_t refcount;
+ uint32_t map_strides[DRV_MAX_PLANES];
void *priv;
};
@@ -130,7 +133,7 @@ int drv_bo_unmap(struct bo *bo, struct mapping *mapping);
int drv_bo_invalidate(struct bo *bo, struct mapping *mapping);
-int drv_bo_flush(struct bo *bo, struct mapping *mapping);
+int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping);
uint32_t drv_bo_get_width(struct bo *bo);
@@ -154,7 +157,7 @@ uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane);
uint32_t drv_bo_get_format(struct bo *bo);
-uint32_t drv_bo_get_stride_in_pixels(struct bo *bo);
+uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane);
uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane);
@@ -164,6 +167,14 @@ size_t drv_num_planes_from_format(uint32_t format);
uint32_t drv_num_buffers_per_bo(struct bo *bo);
+#define drv_log(format, ...) \
+ do { \
+ drv_log_prefix("minigbm", __FILE__, __LINE__, format, ##__VA_ARGS__); \
+ } while (0)
+
+__attribute__((format(printf, 4, 5))) void drv_log_prefix(const char *prefix, const char *file,
+ int line, const char *format, ...);
+
#ifdef __cplusplus
}
#endif
diff --git a/chromium/third_party/minigbm/src/drv_priv.h b/chromium/third_party/minigbm/src/drv_priv.h
index 21c003bae31..719cd356818 100644
--- a/chromium/third_party/minigbm/src/drv_priv.h
+++ b/chromium/third_party/minigbm/src/drv_priv.h
@@ -92,7 +92,11 @@ struct backend {
#define BO_USE_SW_RARELY BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY
-#define LINEAR_METADATA (struct format_metadata) { 0, 1, DRM_FORMAT_MOD_LINEAR }
+#ifndef DRM_FORMAT_MOD_LINEAR
+#define DRM_FORMAT_MOD_LINEAR DRM_FORMAT_MOD_NONE
+#endif
+
+#define LINEAR_METADATA (struct format_metadata) { 1, 0, DRM_FORMAT_MOD_LINEAR }
// clang-format on
#endif
diff --git a/chromium/third_party/minigbm/src/exynos.c b/chromium/third_party/minigbm/src/exynos.c
index 526603e9b05..cf95b38ea10 100644
--- a/chromium/third_party/minigbm/src/exynos.c
+++ b/chromium/third_party/minigbm/src/exynos.c
@@ -56,7 +56,7 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint
bo->total_size = bo->sizes[0] = height * bo->strides[0];
bo->offsets[0] = 0;
} else {
- fprintf(stderr, "drv: unsupported format %X\n", format);
+ drv_log("unsupported format %X\n", format);
assert(0);
return -EINVAL;
}
@@ -72,8 +72,7 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_EXYNOS_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_EXYNOS_GEM_CREATE failed (size=%zu)\n",
- size);
+ drv_log("DRM_IOCTL_EXYNOS_GEM_CREATE failed (size=%zu)\n", size);
goto cleanup_planes;
}
@@ -89,7 +88,7 @@ cleanup_planes:
gem_close.handle = bo->handles[plane - 1].u32;
int gem_close_ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
if (gem_close_ret) {
- fprintf(stderr, "drv: DRM_IOCTL_GEM_CLOSE failed: %d\n", gem_close_ret);
+ drv_log("DRM_IOCTL_GEM_CLOSE failed: %d\n", gem_close_ret);
}
}
diff --git a/chromium/third_party/minigbm/src/gbm.c b/chromium/third_party/minigbm/src/gbm.c
index 25b4fa0bc68..c12c26915e1 100644
--- a/chromium/third_party/minigbm/src/gbm.c
+++ b/chromium/third_party/minigbm/src/gbm.c
@@ -233,7 +233,6 @@ PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t widt
if (!bo || width == 0 || height == 0 || !stride || !map_data)
return NULL;
- *stride = gbm_bo_get_plane_stride(bo, plane);
map_flags = (transfer_flags & GBM_BO_TRANSFER_READ) ? BO_MAP_READ : BO_MAP_NONE;
map_flags |= (transfer_flags & GBM_BO_TRANSFER_WRITE) ? BO_MAP_WRITE : BO_MAP_NONE;
@@ -241,7 +240,9 @@ PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t widt
if (addr == MAP_FAILED)
return MAP_FAILED;
- offset = gbm_bo_get_plane_stride(bo, plane) * rect.y;
+ *stride = ((struct mapping *)*map_data)->vma->map_strides[plane];
+
+ offset = *stride * rect.y;
offset += drv_stride_from_format(bo->gbm_format, rect.x, plane);
return (void *)((uint8_t *)addr + offset);
}
@@ -249,7 +250,7 @@ PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t widt
PUBLIC void gbm_bo_unmap(struct gbm_bo *bo, void *map_data)
{
assert(bo);
- drv_bo_flush(bo->bo, map_data);
+ drv_bo_flush_or_unmap(bo->bo, map_data);
}
PUBLIC uint32_t gbm_bo_get_width(struct gbm_bo *bo)
diff --git a/chromium/third_party/minigbm/src/gbm.h b/chromium/third_party/minigbm/src/gbm.h
index da993c2a16e..ce05ce37003 100644
--- a/chromium/third_party/minigbm/src/gbm.h
+++ b/chromium/third_party/minigbm/src/gbm.h
@@ -265,6 +265,10 @@ enum gbm_bo_flags {
GBM_BO_USE_SW_READ_RARELY = (1 << 10),
GBM_BO_USE_SW_WRITE_OFTEN = (1 << 11),
GBM_BO_USE_SW_WRITE_RARELY = (1 << 12),
+ /**
+ * The buffer will be written by a video decode accelerator.
+ */
+ GBM_BO_USE_HW_VIDEO_DECODER = (1 << 13),
};
int
diff --git a/chromium/third_party/minigbm/src/gbm_helpers.c b/chromium/third_party/minigbm/src/gbm_helpers.c
index 26836696e42..81d1680e91f 100644
--- a/chromium/third_party/minigbm/src/gbm_helpers.c
+++ b/chromium/third_party/minigbm/src/gbm_helpers.c
@@ -40,6 +40,8 @@ uint64_t gbm_convert_usage(uint32_t usage)
use_flags |= BO_USE_SW_WRITE_OFTEN;
if (usage & GBM_BO_USE_SW_WRITE_RARELY)
use_flags |= BO_USE_SW_WRITE_RARELY;
+ if (usage & GBM_BO_USE_HW_VIDEO_DECODER)
+ use_flags |= BO_USE_HW_VIDEO_DECODER;
return use_flags;
}
diff --git a/chromium/third_party/minigbm/src/helpers.c b/chromium/third_party/minigbm/src/helpers.c
index 6c26e54795d..eaaa9370641 100644
--- a/chromium/third_party/minigbm/src/helpers.c
+++ b/chromium/third_party/minigbm/src/helpers.c
@@ -18,37 +18,75 @@
#include "helpers.h"
#include "util.h"
-static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
-{
-
- if (plane != 0) {
- switch (format) {
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YVU420_ANDROID:
- stride = DIV_ROUND_UP(stride, 2);
- break;
- }
- }
-
- return stride;
-}
-
-static uint32_t bpp_from_format(uint32_t format, size_t plane)
+struct planar_layout {
+ size_t num_planes;
+ int horizontal_subsampling[DRV_MAX_PLANES];
+ int vertical_subsampling[DRV_MAX_PLANES];
+ int bytes_per_pixel[DRV_MAX_PLANES];
+};
+
+// clang-format off
+
+static const struct planar_layout packed_1bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 1 }
+};
+
+static const struct planar_layout packed_2bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 2 }
+};
+
+static const struct planar_layout packed_3bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 3 }
+};
+
+static const struct planar_layout packed_4bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 4 }
+};
+
+static const struct planar_layout biplanar_yuv_420_layout = {
+ .num_planes = 2,
+ .horizontal_subsampling = { 1, 2 },
+ .vertical_subsampling = { 1, 2 },
+ .bytes_per_pixel = { 1, 2 }
+};
+
+static const struct planar_layout triplanar_yuv_420_layout = {
+ .num_planes = 3,
+ .horizontal_subsampling = { 1, 2, 2 },
+ .vertical_subsampling = { 1, 2, 2 },
+ .bytes_per_pixel = { 1, 1, 1 }
+};
+
+// clang-format on
+
+static const struct planar_layout *layout_from_format(uint32_t format)
{
- assert(plane < drv_num_planes_from_format(format));
-
switch (format) {
case DRM_FORMAT_BGR233:
case DRM_FORMAT_C8:
case DRM_FORMAT_R8:
case DRM_FORMAT_RGB332:
+ return &packed_1bpp_layout;
+
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YVU420_ANDROID:
- return 8;
+ return &triplanar_yuv_420_layout;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
- return (plane == 0) ? 8 : 4;
+ return &biplanar_yuv_420_layout;
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_ABGR4444:
@@ -74,11 +112,11 @@ static uint32_t bpp_from_format(uint32_t format, size_t plane)
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
- return 16;
+ return &packed_2bpp_layout;
case DRM_FORMAT_BGR888:
case DRM_FORMAT_RGB888:
- return 24;
+ return &packed_3bpp_layout;
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_ABGR8888:
@@ -97,17 +135,44 @@ static uint32_t bpp_from_format(uint32_t format, size_t plane)
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XRGB8888:
- return 32;
+ return &packed_4bpp_layout;
+
+ default:
+ drv_log("UNKNOWN FORMAT %d\n", format);
+ return NULL;
}
+}
- fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format);
- return 0;
+size_t drv_num_planes_from_format(uint32_t format)
+{
+ const struct planar_layout *layout = layout_from_format(format);
+
+ /*
+ * drv_bo_new calls this function early to query number of planes and
+ * considers 0 planes to mean unknown format, so we have to support
+ * that. All other layout_from_format() queries can assume that the
+ * format is supported and that the return value is non-NULL.
+ */
+
+ return layout ? layout->num_planes : 0;
}
-uint32_t drv_bo_get_stride_in_pixels(struct bo *bo)
+uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
{
- uint32_t bytes_per_pixel = DIV_ROUND_UP(bpp_from_format(bo->format, 0), 8);
- return DIV_ROUND_UP(bo->strides[0], bytes_per_pixel);
+ const struct planar_layout *layout = layout_from_format(format);
+
+ assert(plane < layout->num_planes);
+
+ return DIV_ROUND_UP(height, layout->vertical_subsampling[plane]);
+}
+
+uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane)
+{
+ const struct planar_layout *layout = layout_from_format(format);
+
+ assert(plane < layout->num_planes);
+
+ return layout->bytes_per_pixel[plane];
}
/*
@@ -115,7 +180,12 @@ uint32_t drv_bo_get_stride_in_pixels(struct bo *bo)
*/
uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
{
- uint32_t stride = DIV_ROUND_UP(width * bpp_from_format(format, plane), 8);
+ const struct planar_layout *layout = layout_from_format(format);
+ assert(plane < layout->num_planes);
+
+ uint32_t plane_width =
+ DIV_ROUND_UP(width, layout->horizontal_subsampling[plane]);
+ uint32_t stride = plane_width * layout->bytes_per_pixel[plane];
/*
* The stride of Android YV12 buffers is required to be aligned to 16 bytes
@@ -129,20 +199,21 @@ uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
{
- assert(plane < drv_num_planes_from_format(format));
- uint32_t vertical_subsampling;
+ return stride * drv_height_from_format(format, height, plane);
+}
- switch (format) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YVU420_ANDROID:
- vertical_subsampling = (plane == 0) ? 1 : 2;
- break;
- default:
- vertical_subsampling = 1;
+static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
+{
+ if (plane != 0) {
+ switch (format) {
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU420_ANDROID:
+ stride = DIV_ROUND_UP(stride, 2);
+ break;
+ }
}
- return stride * DIV_ROUND_UP(height, vertical_subsampling);
+ return stride;
}
/*
@@ -206,12 +277,12 @@ int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t
memset(&create_dumb, 0, sizeof(create_dumb));
create_dumb.height = aligned_height;
create_dumb.width = aligned_width;
- create_dumb.bpp = bpp_from_format(format, 0);
+ create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8;
create_dumb.flags = 0;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MODE_CREATE_DUMB failed\n");
+ drv_log("DRM_IOCTL_MODE_CREATE_DUMB failed (%d, %d)\n", bo->drv->fd, errno);
return ret;
}
@@ -234,8 +305,7 @@ int drv_dumb_bo_destroy(struct bo *bo)
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n",
- bo->handles[0].u32);
+ drv_log("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32);
return ret;
}
@@ -261,7 +331,7 @@ int drv_gem_bo_destroy(struct bo *bo)
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
+ drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
bo->handles[plane].u32, ret);
error = ret;
}
@@ -283,8 +353,7 @@ int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n",
- prime_handle.fd);
+ drv_log("DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", prime_handle.fd);
/*
* Need to call GEM close on planes that were opened,
@@ -320,7 +389,7 @@ void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MODE_MAP_DUMB failed \n");
+ drv_log("DRM_IOCTL_MODE_MAP_DUMB failed\n");
return MAP_FAILED;
}
@@ -361,7 +430,7 @@ int drv_mapping_destroy(struct bo *bo)
if (!--mapping->vma->refcount) {
ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
if (ret) {
- fprintf(stderr, "drv: munmap failed");
+ drv_log("munmap failed\n");
return ret;
}
diff --git a/chromium/third_party/minigbm/src/helpers.h b/chromium/third_party/minigbm/src/helpers.h
index 6b8818d2d37..4c649c2ceb1 100644
--- a/chromium/third_party/minigbm/src/helpers.h
+++ b/chromium/third_party/minigbm/src/helpers.h
@@ -10,6 +10,7 @@
#include "drv.h"
#include "helpers_array.h"
+uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane);
uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane);
int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format);
int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
diff --git a/chromium/third_party/minigbm/src/i915.c b/chromium/third_party/minigbm/src/i915.c
index da22dc5e832..6df6dccdf43 100644
--- a/chromium/third_party/minigbm/src/i915.c
+++ b/chromium/third_party/minigbm/src/i915.c
@@ -6,8 +6,10 @@
#ifdef DRV_I915
+#include <assert.h>
#include <errno.h>
#include <i915_drm.h>
+#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
@@ -49,6 +51,31 @@ static uint32_t i915_get_gen(int device_id)
return 4;
}
+/*
+ * We allow allocation of ARGB formats for SCANOUT if the corresponding XRGB
+ * formats supports it. It's up to the caller (chrome ozone) to ultimately not
+ * scan out ARGB if the display controller only supports XRGB, but we'll allow
+ * the allocation of the bo here.
+ */
+static bool format_compatible(const struct combination *combo, uint32_t format)
+{
+ if (combo->format == format)
+ return true;
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ return combo->format == DRM_FORMAT_ARGB8888;
+ case DRM_FORMAT_XBGR8888:
+ return combo->format == DRM_FORMAT_ABGR8888;
+ case DRM_FORMAT_RGBX8888:
+ return combo->format == DRM_FORMAT_RGBA8888;
+ case DRM_FORMAT_BGRX8888:
+ return combo->format == DRM_FORMAT_BGRA8888;
+ default:
+ return false;
+ }
+}
+
static int i915_add_kms_item(struct driver *drv, const struct kms_item *item)
{
uint32_t i;
@@ -60,7 +87,7 @@ static int i915_add_kms_item(struct driver *drv, const struct kms_item *item)
*/
for (i = 0; i < drv_array_size(drv->combos); i++) {
combo = (struct combination *)drv_array_at_idx(drv->combos, i);
- if (combo->format != item->format)
+ if (!format_compatible(combo, item->format))
continue;
if (item->modifier == DRM_FORMAT_MOD_LINEAR &&
@@ -74,6 +101,10 @@ static int i915_add_kms_item(struct driver *drv, const struct kms_item *item)
combo->use_flags |= item->use_flags & ~BO_USE_CURSOR;
}
+ /* If we can scanout NV12, we support all tiling modes. */
+ if (item->format == DRM_FORMAT_NV12)
+ combo->use_flags |= item->use_flags;
+
if (combo->metadata.modifier == item->modifier)
combo->use_flags |= item->use_flags;
}
@@ -151,6 +182,11 @@ static int i915_add_combinations(struct driver *drv)
ARRAY_SIZE(tileable_texture_source_formats), &metadata,
texture_use_flags);
+ /* Support y-tiled NV12 for libva */
+ const uint32_t nv12_format = DRM_FORMAT_NV12;
+ drv_add_combinations(drv, &nv12_format, 1, &metadata,
+ BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER);
+
kms_items = drv_query_kms(drv);
if (!kms_items)
return 0;
@@ -171,13 +207,21 @@ static int i915_align_dimensions(struct bo *bo, uint32_t tiling, uint32_t *strid
uint32_t *aligned_height)
{
struct i915_device *i915 = bo->drv->priv;
- uint32_t horizontal_alignment = 4;
- uint32_t vertical_alignment = 4;
+ uint32_t horizontal_alignment;
+ uint32_t vertical_alignment;
switch (tiling) {
default:
case I915_TILING_NONE:
+ /*
+ * The Intel GPU doesn't need any alignment in linear mode,
+ * but libva requires the allocation stride to be aligned to
+ * 16 bytes and height to 4 rows. Further, we round up the
+ * horizontal alignment so that row start on a cache line (64
+ * bytes).
+ */
horizontal_alignment = 64;
+ vertical_alignment = 4;
break;
case I915_TILING_X:
@@ -196,21 +240,6 @@ static int i915_align_dimensions(struct bo *bo, uint32_t tiling, uint32_t *strid
break;
}
- /*
- * The alignment calculated above is based on the full size luma plane and to have chroma
- * planes properly aligned with subsampled formats, we need to multiply luma alignment by
- * subsampling factor.
- */
- switch (bo->format) {
- case DRM_FORMAT_YVU420_ANDROID:
- case DRM_FORMAT_YVU420:
- horizontal_alignment *= 2;
- /* Fall through */
- case DRM_FORMAT_NV12:
- vertical_alignment *= 2;
- break;
- }
-
*aligned_height = ALIGN(bo->height, vertical_alignment);
if (i915->gen > 3) {
*stride = ALIGN(*stride, horizontal_alignment);
@@ -255,7 +284,7 @@ static int i915_init(struct driver *drv)
get_param.value = &device_id;
ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
if (ret) {
- fprintf(stderr, "drv: Failed to get I915_PARAM_CHIPSET_ID\n");
+ drv_log("Failed to get I915_PARAM_CHIPSET_ID\n");
free(i915);
return -EINVAL;
}
@@ -267,7 +296,7 @@ static int i915_init(struct driver *drv)
get_param.value = &i915->has_llc;
ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
if (ret) {
- fprintf(stderr, "drv: Failed to get I915_PARAM_HAS_LLC\n");
+ drv_log("Failed to get I915_PARAM_HAS_LLC\n");
free(i915);
return -EINVAL;
}
@@ -277,12 +306,40 @@ static int i915_init(struct driver *drv)
return i915_add_combinations(drv);
}
+static int i915_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, uint32_t format)
+{
+ uint32_t offset;
+ size_t plane;
+ int ret;
+
+ offset = 0;
+ for (plane = 0; plane < drv_num_planes_from_format(format); plane++) {
+ uint32_t stride = drv_stride_from_format(format, width, plane);
+ uint32_t plane_height = drv_height_from_format(format, height, plane);
+
+ if (bo->tiling != I915_TILING_NONE)
+ assert(IS_ALIGNED(offset, 4096));
+
+ ret = i915_align_dimensions(bo, bo->tiling, &stride, &plane_height);
+ if (ret)
+ return ret;
+
+ bo->strides[plane] = stride;
+ bo->sizes[plane] = stride * plane_height;
+ bo->offsets[plane] = offset;
+ offset += bo->sizes[plane];
+ }
+
+ bo->total_size = offset;
+
+ return 0;
+}
+
static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height,
uint32_t format, uint64_t modifier)
{
int ret;
size_t plane;
- uint32_t stride;
struct drm_i915_gem_create gem_create;
struct drm_i915_gem_set_tiling gem_set_tiling;
@@ -300,57 +357,27 @@ static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t h
bo->format_modifiers[0] = modifier;
- stride = drv_stride_from_format(format, width, 0);
-
- ret = i915_align_dimensions(bo, bo->tiling, &stride, &height);
- if (ret)
- return ret;
-
- /*
- * HAL_PIXEL_FORMAT_YV12 requires the buffer height not be aligned, but we need to keep
- * total size as with aligned height to ensure enough padding space after each plane to
- * satisfy GPU alignment requirements.
- *
- * We do it by first calling drv_bo_from_format() with aligned height and
- * DRM_FORMAT_YVU420, which allows height alignment, saving the total size it calculates
- * and then calling it again with requested parameters.
- *
- * This relies on the fact that i965 driver uses separate surfaces for each plane and
- * contents of padding bytes is not affected, as it is only used to satisfy GPU cache
- * requests.
- *
- * This is enforced by Mesa in src/intel/isl/isl_gen8.c, inside
- * isl_gen8_choose_image_alignment_el(), which is used for GEN9 and GEN8.
- */
if (format == DRM_FORMAT_YVU420_ANDROID) {
- uint32_t unaligned_height = bo->height;
- size_t total_size;
-
- drv_bo_from_format(bo, stride, height, DRM_FORMAT_YVU420);
- total_size = bo->total_size;
- drv_bo_from_format(bo, stride, unaligned_height, format);
- bo->total_size = total_size;
- } else {
+ /*
+ * We only need to be able to use this as a linear texture,
+ * which doesn't put any HW restrictions on how we lay it
+ * out. The Android format does require the stride to be a
+ * multiple of 16 and expects the Cr and Cb stride to be
+ * ALIGN(Y_stride / 2, 16), which we can make happen by
+ * aligning to 32 bytes here.
+ */
+ uint32_t stride = ALIGN(width, 32);
drv_bo_from_format(bo, stride, height, format);
+ } else {
+ i915_bo_from_format(bo, width, height, format);
}
- /*
- * Quoting Mesa ISL library:
- *
- * - For linear surfaces, additional padding of 64 bytes is required at
- * the bottom of the surface. This is in addition to the padding
- * required above.
- */
- if (bo->tiling == I915_TILING_NONE)
- bo->total_size += 64;
-
memset(&gem_create, 0, sizeof(gem_create));
gem_create.size = bo->total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n",
- gem_create.size);
+ drv_log("DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n", gem_create.size);
return ret;
}
@@ -369,7 +396,7 @@ static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t h
gem_close.handle = bo->handles[0].u32;
drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_SET_TILING failed with %d", errno);
+ drv_log("DRM_IOCTL_I915_GEM_SET_TILING failed with %d\n", errno);
return -errno;
}
@@ -425,7 +452,7 @@ static int i915_bo_import(struct bo *bo, struct drv_import_fd_data *data)
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_GET_TILING, &gem_get_tiling);
if (ret) {
drv_gem_bo_destroy(bo);
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_GET_TILING failed.");
+ drv_log("DRM_IOCTL_I915_GEM_GET_TILING failed.\n");
return ret;
}
@@ -451,7 +478,7 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_MMAP failed\n");
+ drv_log("DRM_IOCTL_I915_GEM_MMAP failed\n");
return MAP_FAILED;
}
@@ -464,7 +491,7 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_MMAP_GTT failed\n");
+ drv_log("DRM_IOCTL_I915_GEM_MMAP_GTT failed\n");
return MAP_FAILED;
}
@@ -473,7 +500,7 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
}
if (addr == MAP_FAILED) {
- fprintf(stderr, "drv: i915 GEM mmap failed\n");
+ drv_log("i915 GEM mmap failed\n");
return addr;
}
@@ -500,7 +527,7 @@ static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping)
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_SET_DOMAIN with %d\n", ret);
+ drv_log("DRM_IOCTL_I915_GEM_SET_DOMAIN with %d\n", ret);
return ret;
}
diff --git a/chromium/third_party/minigbm/src/mediatek.c b/chromium/third_party/minigbm/src/mediatek.c
index 761400486cd..cfb60b36626 100644
--- a/chromium/third_party/minigbm/src/mediatek.c
+++ b/chromium/third_party/minigbm/src/mediatek.c
@@ -62,8 +62,7 @@ static int mediatek_bo_create(struct bo *bo, uint32_t width, uint32_t height, ui
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MTK_GEM_CREATE failed (size=%llu)\n",
- gem_create.size);
+ drv_log("DRM_IOCTL_MTK_GEM_CREATE failed (size=%llu)\n", gem_create.size);
return ret;
}
@@ -84,7 +83,7 @@ static void *mediatek_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint3
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_MAP_OFFSET, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MTK_GEM_MAP_OFFSET failed\n");
+ drv_log("DRM_IOCTL_MTK_GEM_MAP_OFFSET failed\n");
return MAP_FAILED;
}
diff --git a/chromium/third_party/minigbm/src/msm.c b/chromium/third_party/minigbm/src/msm.c
index 6e18789865a..fe09de0bb12 100644
--- a/chromium/third_party/minigbm/src/msm.c
+++ b/chromium/third_party/minigbm/src/msm.c
@@ -24,7 +24,7 @@ static int msm_init(struct driver *drv)
}
static int msm_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t flags)
+ uint64_t flags)
{
width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
diff --git a/chromium/third_party/minigbm/src/rockchip.c b/chromium/third_party/minigbm/src/rockchip.c
index ac17dbda107..a0d91412393 100644
--- a/chromium/third_party/minigbm/src/rockchip.c
+++ b/chromium/third_party/minigbm/src/rockchip.c
@@ -186,7 +186,7 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
} else {
if (!has_modifier(modifiers, count, DRM_FORMAT_MOD_LINEAR)) {
errno = EINVAL;
- fprintf(stderr, "no usable modifier found\n");
+ drv_log("no usable modifier found\n");
return -1;
}
@@ -212,8 +212,7 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_ROCKCHIP_GEM_CREATE failed (size=%llu)\n",
- gem_create.size);
+ drv_log("DRM_IOCTL_ROCKCHIP_GEM_CREATE failed (size=%llu)\n", gem_create.size);
return ret;
}
@@ -247,7 +246,7 @@ static void *rockchip_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint3
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET failed\n");
+ drv_log("DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET failed\n");
return MAP_FAILED;
}
diff --git a/chromium/third_party/minigbm/src/tegra.c b/chromium/third_party/minigbm/src/tegra.c
index f0651d79f0f..fb2f6a9cb54 100644
--- a/chromium/third_party/minigbm/src/tegra.c
+++ b/chromium/third_party/minigbm/src/tegra.c
@@ -228,7 +228,7 @@ static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint3
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size);
+ drv_log("DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size);
return ret;
}
@@ -286,7 +286,7 @@ static int tegra_bo_import(struct bo *bo, struct drv_import_fd_data *data)
} else if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_BLOCK) {
bo->tiling = NV_MEM_KIND_C32_2CRA;
} else {
- fprintf(stderr, "tegra_bo_import: unknown tile format %d", gem_get_tiling.mode);
+ drv_log("%s: unknown tile format %d\n", __func__, gem_get_tiling.mode);
drv_gem_bo_destroy(bo);
assert(0);
}
@@ -306,7 +306,7 @@ static void *tegra_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_MMAP, &gem_map, sizeof(gem_map));
if (ret < 0) {
- fprintf(stderr, "drv: DRM_TEGRA_GEM_MMAP failed\n");
+ drv_log("DRM_TEGRA_GEM_MMAP failed\n");
return MAP_FAILED;
}
diff --git a/chromium/third_party/minigbm/src/util.h b/chromium/third_party/minigbm/src/util.h
index fd61d9b4b61..e4e13991473 100644
--- a/chromium/third_party/minigbm/src/util.h
+++ b/chromium/third_party/minigbm/src/util.h
@@ -10,7 +10,8 @@
#define MAX(A, B) ((A) > (B) ? (A) : (B))
#define ARRAY_SIZE(A) (sizeof(A) / sizeof(*(A)))
#define PUBLIC __attribute__((visibility("default")))
-#define ALIGN(A, B) (((A) + (B)-1) / (B) * (B))
+#define ALIGN(A, B) (((A) + (B)-1) & ~((B)-1))
+#define IS_ALIGNED(A, B) (ALIGN((A), (B)) == (A))
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
#endif
diff --git a/chromium/third_party/minigbm/src/vc4.c b/chromium/third_party/minigbm/src/vc4.c
index 79602474ae0..71e73ea5ba0 100644
--- a/chromium/third_party/minigbm/src/vc4.c
+++ b/chromium/third_party/minigbm/src/vc4.c
@@ -48,8 +48,7 @@ static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VC4_CREATE_BO, &bo_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n",
- bo->total_size);
+ drv_log("DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n", bo->total_size);
return ret;
}
@@ -69,12 +68,12 @@ static void *vc4_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t m
ret = drmCommandWriteRead(bo->drv->fd, DRM_VC4_MMAP_BO, &bo_map, sizeof(bo_map));
if (ret) {
- fprintf(stderr, "drv: DRM_VC4_MMAP_BO failed\n");
+ drv_log("DRM_VC4_MMAP_BO failed\n");
return MAP_FAILED;
}
vma->length = bo->total_size;
- return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ return mmap(NULL, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
bo_map.offset);
}
diff --git a/chromium/third_party/minigbm/src/virtio_dumb.c b/chromium/third_party/minigbm/src/virtio_dumb.c
deleted file mode 100644
index b6dc3cbf9f4..00000000000
--- a/chromium/third_party/minigbm/src/virtio_dumb.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 2016 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef DRV_VIRGL
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-#define MESA_LLVMPIPE_TILE_ORDER 6
-#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888 };
-
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
- DRM_FORMAT_YVU420_ANDROID };
-
-static int virtio_gpu_init(struct driver *drv)
-{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
-
- drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
-
- return drv_modify_linear_combinations(drv);
-}
-
-static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
-{
- width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
- height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
-
- /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. */
- if (bo->format == DRM_FORMAT_YVU420_ANDROID)
- height = bo->height;
-
- return drv_dumb_bo_create(bo, width, height, format, use_flags);
-}
-
-static uint32_t virtio_gpu_resolve_format(uint32_t format, uint64_t use_flags)
-{
- switch (format) {
- case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
- /*HACK: See b/28671744 */
- return DRM_FORMAT_XBGR8888;
- case DRM_FORMAT_FLEX_YCbCr_420_888:
- return DRM_FORMAT_YVU420;
- default:
- return format;
- }
-}
-
-const struct backend backend_virtio_gpu = {
- .name = "virtio_gpu",
- .init = virtio_gpu_init,
- .bo_create = virtio_gpu_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
- .bo_unmap = drv_bo_munmap,
- .resolve_format = virtio_gpu_resolve_format,
-};
-
-#endif
diff --git a/chromium/third_party/minigbm/src/virtio_virgl.c b/chromium/third_party/minigbm/src/virtio_gpu.c
index b33677bacda..5200b3d10b2 100644
--- a/chromium/third_party/minigbm/src/virtio_virgl.c
+++ b/chromium/third_party/minigbm/src/virtio_gpu.c
@@ -4,8 +4,6 @@
* found in the LICENSE file.
*/
-#ifdef DRV_VIRGL
-
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
@@ -19,15 +17,27 @@
#include "util.h"
#include "virgl_hw.h"
+#ifndef PAGE_SIZE
#define PAGE_SIZE 0x1000
+#endif
#define PIPE_TEXTURE_2D 2
+#define MESA_LLVMPIPE_TILE_ORDER 6
+#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
+
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888 };
+static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU420_ANDROID };
+
static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_RG88 };
+struct virtio_gpu_priv {
+ int has_3d;
+};
+
static uint32_t translate_format(uint32_t drm_fourcc, uint32_t plane)
{
switch (drm_fourcc) {
@@ -50,19 +60,21 @@ static uint32_t translate_format(uint32_t drm_fourcc, uint32_t plane)
}
}
-static int virtio_gpu_init(struct driver *drv)
+static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
+ width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
+ height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
- drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+ /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. */
+ if (bo->format == DRM_FORMAT_YVU420_ANDROID)
+ height = bo->height;
- return drv_modify_linear_combinations(drv);
+ return drv_dumb_bo_create(bo, width, height, format, use_flags);
}
-static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
int ret;
ssize_t plane;
@@ -98,7 +110,7 @@ static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height,
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n",
+ drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n",
strerror(errno));
goto fail;
}
@@ -125,7 +137,7 @@ fail:
return ret;
}
-static void *virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
struct drm_virtgpu_map gem_map;
@@ -135,18 +147,91 @@ static void *virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
+ drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
return MAP_FAILED;
}
+ vma->length = bo->total_size;
return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
}
+static int virtio_gpu_init(struct driver *drv)
+{
+ int ret;
+ struct virtio_gpu_priv *priv;
+ struct drm_virtgpu_getparam args;
+
+ priv = calloc(1, sizeof(*priv));
+ drv->priv = priv;
+
+ memset(&args, 0, sizeof(args));
+ args.param = VIRTGPU_PARAM_3D_FEATURES;
+ args.value = (uint64_t)(uintptr_t)&priv->has_3d;
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &args);
+ if (ret) {
+ drv_log("virtio 3D acceleration is not available\n");
+ /* Be paranoid */
+ priv->has_3d = 0;
+ }
+
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
+
+ if (priv->has_3d)
+ drv_add_combinations(drv, texture_source_formats,
+ ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
+ BO_USE_TEXTURE_MASK);
+ else
+ drv_add_combinations(drv, dumb_texture_source_formats,
+ ARRAY_SIZE(dumb_texture_source_formats), &LINEAR_METADATA,
+ BO_USE_TEXTURE_MASK);
+
+ return drv_modify_linear_combinations(drv);
+}
+
+static void virtio_gpu_close(struct driver *drv)
+{
+ free(drv->priv);
+ drv->priv = NULL;
+}
+
+static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ if (priv->has_3d)
+ return virtio_virgl_bo_create(bo, width, height, format, use_flags);
+ else
+ return virtio_dumb_bo_create(bo, width, height, format, use_flags);
+}
+
+static int virtio_gpu_bo_destroy(struct bo *bo)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ if (priv->has_3d)
+ return drv_gem_bo_destroy(bo);
+ else
+ return drv_dumb_bo_destroy(bo);
+}
+
+static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ if (priv->has_3d)
+ return virtio_virgl_bo_map(bo, vma, plane, map_flags);
+ else
+ return drv_dumb_bo_map(bo, vma, plane, map_flags);
+}
+
static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
int ret;
struct drm_virtgpu_3d_transfer_from_host xfer;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+
+ if (!priv->has_3d)
+ return 0;
memset(&xfer, 0, sizeof(xfer));
xfer.bo_handle = mapping->vma->handle;
@@ -158,8 +243,7 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n",
- strerror(errno));
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
return ret;
}
@@ -170,6 +254,10 @@ static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
{
int ret;
struct drm_virtgpu_3d_transfer_to_host xfer;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+
+ if (!priv->has_3d)
+ return 0;
if (!(mapping->vma->map_flags & BO_MAP_WRITE))
return 0;
@@ -184,8 +272,7 @@ static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n",
- strerror(errno));
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
return ret;
}
@@ -198,22 +285,23 @@ static uint32_t virtio_gpu_resolve_format(uint32_t format, uint64_t use_flags)
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
+ case DRM_FORMAT_FLEX_YCbCr_420_888:
+ return DRM_FORMAT_YVU420;
default:
return format;
}
}
-struct backend backend_virtio_gpu = {
+const struct backend backend_virtio_gpu = {
.name = "virtio_gpu",
.init = virtio_gpu_init,
+ .close = virtio_gpu_close,
.bo_create = virtio_gpu_bo_create,
- .bo_destroy = drv_gem_bo_destroy,
+ .bo_destroy = virtio_gpu_bo_destroy,
.bo_import = drv_prime_bo_import,
- .bo_map = virgl_bo_map,
+ .bo_map = virtio_gpu_bo_map,
.bo_unmap = drv_bo_munmap,
.bo_invalidate = virtio_gpu_bo_invalidate,
.bo_flush = virtio_gpu_bo_flush,
.resolve_format = virtio_gpu_resolve_format,
};
-
-#endif