summaryrefslogtreecommitdiff
path: root/chromium/third_party/minigbm
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-16 11:45:35 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-17 08:59:23 +0000
commit552906b0f222c5d5dd11b9fd73829d510980461a (patch)
tree3a11e6ed0538a81dd83b20cf3a4783e297f26d91 /chromium/third_party/minigbm
parent1b05827804eaf047779b597718c03e7d38344261 (diff)
downloadqtwebengine-chromium-552906b0f222c5d5dd11b9fd73829d510980461a.tar.gz
BASELINE: Update Chromium to 83.0.4103.122
Change-Id: Ie3a82f5bb0076eec2a7c6a6162326b4301ee291e Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/third_party/minigbm')
-rw-r--r--chromium/third_party/minigbm/BUILD.gn25
-rw-r--r--chromium/third_party/minigbm/src/Android.mk1
-rw-r--r--chromium/third_party/minigbm/src/OWNERS2
-rw-r--r--chromium/third_party/minigbm/src/amdgpu.c106
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc19
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h1
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc29
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h2
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc6
-rw-r--r--chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc65
-rw-r--r--chromium/third_party/minigbm/src/dri.c241
-rw-r--r--chromium/third_party/minigbm/src/dri.h11
-rw-r--r--chromium/third_party/minigbm/src/drv.c187
-rw-r--r--chromium/third_party/minigbm/src/drv.h46
-rw-r--r--chromium/third_party/minigbm/src/drv_priv.h39
-rw-r--r--chromium/third_party/minigbm/src/exynos.c20
-rw-r--r--chromium/third_party/minigbm/src/gbm.c199
-rw-r--r--chromium/third_party/minigbm/src/gbm.h191
-rw-r--r--chromium/third_party/minigbm/src/gbm.pc2
-rw-r--r--chromium/third_party/minigbm/src/helpers.c216
-rw-r--r--chromium/third_party/minigbm/src/helpers.h6
-rw-r--r--chromium/third_party/minigbm/src/i915.c331
-rw-r--r--chromium/third_party/minigbm/src/mediatek.c81
-rw-r--r--chromium/third_party/minigbm/src/meson.c8
-rw-r--r--chromium/third_party/minigbm/src/msm.c76
-rwxr-xr-xchromium/third_party/minigbm/src/presubmit.sh1
-rw-r--r--chromium/third_party/minigbm/src/rockchip.c115
-rw-r--r--chromium/third_party/minigbm/src/synaptics.c33
-rw-r--r--chromium/third_party/minigbm/src/tegra.c38
-rw-r--r--chromium/third_party/minigbm/src/util.h2
-rw-r--r--chromium/third_party/minigbm/src/vc4.c50
-rw-r--r--chromium/third_party/minigbm/src/virgl_hw.h3
-rw-r--r--chromium/third_party/minigbm/src/virtgpu_drm.h188
-rw-r--r--chromium/third_party/minigbm/src/virtio_gpu.c350
34 files changed, 1727 insertions, 963 deletions
diff --git a/chromium/third_party/minigbm/BUILD.gn b/chromium/third_party/minigbm/BUILD.gn
index 0a100c8d341..8cc5f9d781d 100644
--- a/chromium/third_party/minigbm/BUILD.gn
+++ b/chromium/third_party/minigbm/BUILD.gn
@@ -2,16 +2,16 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/config/chromecast_build.gni")
import("//build/config/linux/pkg_config.gni")
assert(is_linux)
declare_args() {
- # Controls whether the build should use the version of minigbm library
- # shipped with the system. In release builds of Chrome OS we use the
- # system version, but when building on dev workstations or the Chrome
- # waterfall we bundle it because Ubuntu doesn't ship a usable version.
- use_system_minigbm = false
+ # Controls whether the build should use the version of minigbm library shipped
+ # with the system. In release builds of desktop Linux and Chrome OS we use the
+ # system version.
+ use_system_minigbm = is_desktop_linux && !is_chromecast
use_amdgpu_minigbm = false
use_exynos_minigbm = false
@@ -22,6 +22,7 @@ declare_args() {
use_msm_minigbm = false
use_radeon_minigbm = false
use_rockchip_minigbm = false
+ use_synaptics_minigbm = false
use_tegra_minigbm = false
use_vc4_minigbm = false
}
@@ -57,6 +58,9 @@ if (!use_system_minigbm) {
if (use_rockchip_minigbm) {
defines += [ "DRV_ROCKCHIP" ]
}
+ if (use_synaptics_minigbm) {
+ defines += [ "DRV_SYNAPTICS" ]
+ }
if (use_tegra_minigbm) {
defines += [ "DRV_TEGRA" ]
}
@@ -68,8 +72,8 @@ if (!use_system_minigbm) {
shared_library("minigbm") {
sources = [
"src/amdgpu.c",
- "src/drv.c",
"src/dri.c",
+ "src/drv.c",
"src/evdi.c",
"src/exynos.c",
"src/gbm.c",
@@ -84,6 +88,7 @@ if (!use_system_minigbm) {
"src/nouveau.c",
"src/radeon.c",
"src/rockchip.c",
+ "src/synaptics.c",
"src/tegra.c",
"src/udl.c",
"src/vc4.c",
@@ -94,9 +99,7 @@ if (!use_system_minigbm) {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
- deps = [
- "//build/config/linux/libdrm",
- ]
+ deps = [ "//build/config/linux/libdrm" ]
public_configs = [ ":minigbm_config" ]
if (use_exynos_minigbm) {
@@ -111,9 +114,7 @@ if (!use_system_minigbm) {
# This target is used for Chromecast build, which expects the resulting lib
# to have a name <GN target name> + .so
group("libminigbm") {
- deps = [
- ":minigbm",
- ]
+ deps = [ ":minigbm" ]
}
}
diff --git a/chromium/third_party/minigbm/src/Android.mk b/chromium/third_party/minigbm/src/Android.mk
index 564f416f691..3eab7aeb8c0 100644
--- a/chromium/third_party/minigbm/src/Android.mk
+++ b/chromium/third_party/minigbm/src/Android.mk
@@ -23,6 +23,7 @@ MINIGBM_SRC := \
nouveau.c \
radeon.c \
rockchip.c \
+ synaptics.c \
tegra.c \
udl.c \
vc4.c \
diff --git a/chromium/third_party/minigbm/src/OWNERS b/chromium/third_party/minigbm/src/OWNERS
index a6feb041243..f4aa651e992 100644
--- a/chromium/third_party/minigbm/src/OWNERS
+++ b/chromium/third_party/minigbm/src/OWNERS
@@ -6,3 +6,5 @@ ddavenport@chromium.org
dbehr@chromium.org
dcastagna@chromium.org
lepton@chromium.org
+tutankhamen@chromium.org
+stevensd@chromium.org
diff --git a/chromium/third_party/minigbm/src/amdgpu.c b/chromium/third_party/minigbm/src/amdgpu.c
index 65dd8644f45..795d1379060 100644
--- a/chromium/third_party/minigbm/src/amdgpu.c
+++ b/chromium/third_party/minigbm/src/amdgpu.c
@@ -18,11 +18,9 @@
#include "helpers.h"
#include "util.h"
-#ifdef __ANDROID__
-#define DRI_PATH "/vendor/lib/dri/radeonsi_dri.so"
-#else
-#define DRI_PATH "/usr/lib64/dri/radeonsi_dri.so"
-#endif
+// clang-format off
+#define DRI_PATH STRINGIZE(DRI_DRIVER_DIR/radeonsi_dri.so)
+// clang-format on
#define TILE_TYPE_LINEAR 0
/* DRI backend decides tiling in this case. */
@@ -140,14 +138,49 @@ static void amdgpu_close(struct driver *drv)
drv->priv = NULL;
}
-static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
int ret;
uint32_t plane, stride;
- struct combination *combo;
union drm_amdgpu_gem_create gem_create;
+ stride = drv_stride_from_format(format, width, 0);
+ stride = ALIGN(stride, 256);
+
+ drv_bo_from_format(bo, stride, height, format);
+
+ memset(&gem_create, 0, sizeof(gem_create));
+ gem_create.in.bo_size = bo->meta.total_size;
+ gem_create.in.alignment = 256;
+ gem_create.in.domain_flags = 0;
+
+ if (use_flags & (BO_USE_LINEAR | BO_USE_SW_MASK))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+ if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SCANOUT)))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
+ /* Allocate the buffer with the preferred heap. */
+ ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
+ sizeof(gem_create));
+ if (ret < 0)
+ return ret;
+
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
+ bo->handles[plane].u32 = gem_create.out.handle;
+
+ bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+
+ return 0;
+}
+
+static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ struct combination *combo;
+
combo = drv_get_combination(bo->drv, format, use_flags);
if (!combo)
return -EINVAL;
@@ -177,43 +210,38 @@ static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint
return dri_bo_create(bo, width, height, format, use_flags);
}
- stride = drv_stride_from_format(format, width, 0);
- stride = ALIGN(stride, 256);
-
- drv_bo_from_format(bo, stride, height, format);
-
- memset(&gem_create, 0, sizeof(gem_create));
- gem_create.in.bo_size = bo->total_size;
- gem_create.in.alignment = 256;
- gem_create.in.domain_flags = 0;
-
- if (use_flags & (BO_USE_LINEAR | BO_USE_SW_MASK))
- gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ return amdgpu_create_bo_linear(bo, width, height, format, use_flags);
+}
- gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
- if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SCANOUT)))
- gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+static int amdgpu_create_bo_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
+ uint32_t format, const uint64_t *modifiers,
+ uint32_t count)
+{
+ bool only_use_linear = true;
- /* Allocate the buffer with the preferred heap. */
- ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
- sizeof(gem_create));
- if (ret < 0)
- return ret;
+ for (uint32_t i = 0; i < count; ++i)
+ if (modifiers[i] != DRM_FORMAT_MOD_LINEAR)
+ only_use_linear = false;
- for (plane = 0; plane < bo->num_planes; plane++)
- bo->handles[plane].u32 = gem_create.out.handle;
+ if (only_use_linear)
+ return amdgpu_create_bo_linear(bo, width, height, format, BO_USE_SCANOUT);
- return 0;
+ return dri_bo_create_with_modifiers(bo, width, height, format, modifiers, count);
}
static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
{
- struct combination *combo;
- combo = drv_get_combination(bo->drv, data->format, data->use_flags);
- if (!combo)
- return -EINVAL;
+ bool dri_tiling = data->format_modifiers[0] != DRM_FORMAT_MOD_LINEAR;
+ if (data->format_modifiers[0] == DRM_FORMAT_MOD_INVALID) {
+ struct combination *combo;
+ combo = drv_get_combination(bo->drv, data->format, data->use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ dri_tiling = combo->metadata.tiling == TILE_TYPE_DRI;
+ }
- if (combo->metadata.tiling == TILE_TYPE_DRI)
+ if (dri_tiling)
return dri_bo_import(bo, data);
else
return drv_prime_bo_import(bo, data);
@@ -244,9 +272,9 @@ static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_
return MAP_FAILED;
}
- vma->length = bo->total_size;
+ vma->length = bo->meta.total_size;
- return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.out.addr_ptr);
}
@@ -305,12 +333,14 @@ const struct backend backend_amdgpu = {
.init = amdgpu_init,
.close = amdgpu_close,
.bo_create = amdgpu_create_bo,
+ .bo_create_with_modifiers = amdgpu_create_bo_with_modifiers,
.bo_destroy = amdgpu_destroy_bo,
.bo_import = amdgpu_import_bo,
.bo_map = amdgpu_map_bo,
.bo_unmap = amdgpu_unmap_bo,
.bo_invalidate = amdgpu_bo_invalidate,
.resolve_format = amdgpu_resolve_format,
+ .num_planes_from_modifier = dri_num_planes_from_modifier,
};
#endif
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc
index 0301af12d32..1066edccfda 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.cc
@@ -65,7 +65,18 @@ int32_t cros_gralloc_buffer::lock(const struct rectangle *rect, uint32_t map_fla
drv_bo_invalidate(bo_, lock_data_[0]);
vaddr = lock_data_[0]->vma->addr;
} else {
- vaddr = drv_bo_map(bo_, rect, map_flags, &lock_data_[0], 0);
+ struct rectangle r = *rect;
+
+ if (!r.width && !r.height && !r.x && !r.y) {
+ /*
+ * Android IMapper.hal: An accessRegion of all-zeros means the
+ * entire buffer.
+ */
+ r.width = drv_bo_get_width(bo_);
+ r.height = drv_bo_get_height(bo_);
+ }
+
+ vaddr = drv_bo_map(bo_, &r, map_flags, &lock_data_[0], 0);
}
if (vaddr == MAP_FAILED) {
@@ -97,3 +108,9 @@ int32_t cros_gralloc_buffer::unlock()
return 0;
}
+
+int32_t cros_gralloc_buffer::resource_info(uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES])
+{
+ return drv_resource_info(bo_, strides, offsets);
+}
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h
index e6aec91df7b..ebd72ec2cac 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_buffer.h
@@ -26,6 +26,7 @@ class cros_gralloc_buffer
int32_t lock(const struct rectangle *rect, uint32_t map_flags,
uint8_t *addr[DRV_MAX_PLANES]);
int32_t unlock();
+ int32_t resource_info(uint32_t strides[DRV_MAX_PLANES], uint32_t offsets[DRV_MAX_PLANES]);
private:
cros_gralloc_buffer(cros_gralloc_buffer const &);
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc
index 8a63864d2cb..62b43d4c4a3 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.cc
@@ -114,6 +114,15 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto
if (resolved_format == DRM_FORMAT_NV12)
use_flags |= BO_USE_LINEAR;
+ /*
+ * This unmask is a backup in the case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED is resolved
+ * to non-YUV formats.
+ */
+ if (descriptor->drm_format == DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED &&
+ (resolved_format == DRM_FORMAT_XBGR8888 || resolved_format == DRM_FORMAT_ABGR8888)) {
+ use_flags &= ~BO_USE_HW_VIDEO_ENCODER;
+ }
+
bo = drv_bo_create(drv_, descriptor->width, descriptor->height, resolved_format, use_flags);
if (!bo) {
drv_log("Failed to create bo.\n");
@@ -324,6 +333,26 @@ int32_t cros_gralloc_driver::get_backing_store(buffer_handle_t handle, uint64_t
return 0;
}
+int32_t cros_gralloc_driver::resource_info(buffer_handle_t handle, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES])
+{
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (!buffer) {
+ drv_log("Invalid Reference.\n");
+ return -EINVAL;
+ }
+
+ return buffer->resource_info(strides, offsets);
+}
+
cros_gralloc_buffer *cros_gralloc_driver::get_buffer(cros_gralloc_handle_t hnd)
{
/* Assumes driver mutex is held. */
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h
index 45782c93115..f0512777ce1 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_driver.h
@@ -31,6 +31,8 @@ class cros_gralloc_driver
int32_t unlock(buffer_handle_t handle, int32_t *release_fence);
int32_t get_backing_store(buffer_handle_t handle, uint64_t *out_store);
+ int32_t resource_info(buffer_handle_t handle, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES]);
private:
cros_gralloc_driver(cros_gralloc_driver const &);
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc
index 12daf4b8086..73e59cb5700 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/cros_gralloc_helpers.cc
@@ -39,6 +39,12 @@ uint32_t cros_gralloc_convert_format(int format)
*/
case HAL_PIXEL_FORMAT_BLOB:
return DRM_FORMAT_R8;
+#if ANDROID_VERSION >= 0x0a00
+ case HAL_PIXEL_FORMAT_RGBA_1010102:
+ return DRM_FORMAT_ABGR2101010;
+ case HAL_PIXEL_FORMAT_RGBA_FP16:
+ return DRM_FORMAT_ABGR16161616F;
+#endif
}
return DRM_FORMAT_NONE;
diff --git a/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc b/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc
index df1f62cb86c..6c49d3ae3e1 100644
--- a/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc
+++ b/chromium/third_party/minigbm/src/cros_gralloc/gralloc0/gralloc0.cc
@@ -4,6 +4,7 @@
* found in the LICENSE file.
*/
+#include "../../util.h"
#include "../cros_gralloc_driver.h"
#include <cassert>
@@ -33,6 +34,11 @@ enum {
};
// clang-format on
+// Gralloc0 doesn't define a video decoder flag. However, the IAllocator gralloc0
+// passthrough gives the low 32-bits of the BufferUsage flags to gralloc0 in their
+// entirety, so we can detect the video decoder flag passed by IAllocator clients.
+#define BUFFER_USAGE_VIDEO_DECODER (1 << 22)
+
static uint64_t gralloc0_convert_usage(int usage)
{
uint64_t use_flags = BO_USE_NONE;
@@ -66,15 +72,19 @@ static uint64_t gralloc0_convert_usage(int usage)
use_flags |= BO_USE_NONE;
if (usage & GRALLOC_USAGE_PROTECTED)
use_flags |= BO_USE_PROTECTED;
- if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)
+ if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) {
+ use_flags |= BO_USE_HW_VIDEO_ENCODER;
/*HACK: See b/30054495 */
use_flags |= BO_USE_SW_READ_OFTEN;
+ }
if (usage & GRALLOC_USAGE_HW_CAMERA_WRITE)
use_flags |= BO_USE_CAMERA_WRITE;
if (usage & GRALLOC_USAGE_HW_CAMERA_READ)
use_flags |= BO_USE_CAMERA_READ;
if (usage & GRALLOC_USAGE_RENDERSCRIPT)
use_flags |= BO_USE_RENDERSCRIPT;
+ if (usage & BUFFER_USAGE_VIDEO_DECODER)
+ use_flags |= BO_USE_HW_VIDEO_DECODER;
return use_flags;
}
@@ -91,6 +101,13 @@ static uint32_t gralloc0_convert_map_usage(int map_usage)
return map_flags;
}
+static int gralloc0_droid_yuv_format(int droid_format)
+{
+
+ return (droid_format == HAL_PIXEL_FORMAT_YCbCr_420_888 ||
+ droid_format == HAL_PIXEL_FORMAT_YV12);
+}
+
static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usage,
buffer_handle_t *handle, int *stride)
{
@@ -111,6 +128,14 @@ static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usa
descriptor.use_flags &= ~BO_USE_SCANOUT;
supported = mod->driver->is_supported(&descriptor);
}
+ if (!supported && (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) &&
+ !gralloc0_droid_yuv_format(format)) {
+ // Unmask BO_USE_HW_VIDEO_ENCODER in the case of non-yuv formats
+ // because they are not input to a hw encoder but used as an
+ // intermediate format (e.g. camera).
+ descriptor.use_flags &= ~BO_USE_HW_VIDEO_ENCODER;
+ supported = mod->driver->is_supported(&descriptor);
+ }
if (!supported) {
drv_log("Unsupported combination -- HAL format: %u, HAL usage: %u, "
@@ -237,6 +262,8 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
uint64_t *out_store;
buffer_handle_t handle;
uint32_t *out_width, *out_height, *out_stride;
+ uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
+ uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
auto mod = (struct gralloc0_module const *)module;
switch (op) {
@@ -262,7 +289,17 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
switch (op) {
case GRALLOC_DRM_GET_STRIDE:
out_stride = va_arg(args, uint32_t *);
- *out_stride = hnd->pixel_stride;
+ ret = mod->driver->resource_info(handle, strides, offsets);
+ if (ret)
+ break;
+
+ if (strides[0] != hnd->strides[0]) {
+ uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(hnd->format, 0);
+ *out_stride = DIV_ROUND_UP(strides[0], bytes_per_pixel);
+ } else {
+ *out_stride = hnd->pixel_stride;
+ }
+
break;
case GRALLOC_DRM_GET_FORMAT:
out_format = va_arg(args, int32_t *);
@@ -340,6 +377,8 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
{
int32_t ret;
uint32_t map_flags;
+ uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
+ uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
uint8_t *addr[DRV_MAX_PLANES] = { nullptr, nullptr, nullptr, nullptr };
auto mod = (struct gralloc0_module const *)module;
struct rectangle rect = { .x = static_cast<uint32_t>(l),
@@ -353,9 +392,8 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
return -EINVAL;
}
- if ((hnd->droid_format != HAL_PIXEL_FORMAT_YCbCr_420_888) &&
- (hnd->droid_format != HAL_PIXEL_FORMAT_YV12) &&
- (hnd->droid_format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)) {
+ if (!gralloc0_droid_yuv_format(hnd->droid_format) &&
+ hnd->droid_format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
drv_log("Non-YUV format not compatible.\n");
return -EINVAL;
}
@@ -370,13 +408,22 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
if (ret)
return ret;
+ if (!map_flags) {
+ ret = mod->driver->resource_info(handle, strides, offsets);
+ if (ret)
+ return ret;
+
+ for (uint32_t plane = 0; plane < DRV_MAX_PLANES; plane++)
+ addr[plane] = static_cast<uint8_t *>(nullptr) + offsets[plane];
+ }
+
switch (hnd->format) {
case DRM_FORMAT_NV12:
ycbcr->y = addr[0];
ycbcr->cb = addr[1];
ycbcr->cr = addr[1] + 1;
- ycbcr->ystride = hnd->strides[0];
- ycbcr->cstride = hnd->strides[1];
+ ycbcr->ystride = (!map_flags) ? strides[0] : hnd->strides[0];
+ ycbcr->cstride = (!map_flags) ? strides[1] : hnd->strides[1];
ycbcr->chroma_step = 2;
break;
case DRM_FORMAT_YVU420:
@@ -384,8 +431,8 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
ycbcr->y = addr[0];
ycbcr->cb = addr[2];
ycbcr->cr = addr[1];
- ycbcr->ystride = hnd->strides[0];
- ycbcr->cstride = hnd->strides[1];
+ ycbcr->ystride = (!map_flags) ? strides[0] : hnd->strides[0];
+ ycbcr->cstride = (!map_flags) ? strides[1] : hnd->strides[1];
ycbcr->chroma_step = 1;
break;
default:
diff --git a/chromium/third_party/minigbm/src/dri.c b/chromium/third_party/minigbm/src/dri.c
index a9c1ed7d1b9..97dc567e6ba 100644
--- a/chromium/third_party/minigbm/src/dri.c
+++ b/chromium/third_party/minigbm/src/dri.c
@@ -65,41 +65,130 @@ static bool lookup_extension(const __DRIextension *const *extensions, const char
}
/*
+ * Close Gem Handle
+ */
+static void close_gem_handle(uint32_t handle, int fd)
+{
+ struct drm_gem_close gem_close;
+ int ret = 0;
+
+ memset(&gem_close, 0, sizeof(gem_close));
+ gem_close.handle = handle;
+ ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ if (ret)
+ drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n", handle, ret);
+}
+
+/*
* The DRI GEM namespace may be different from the minigbm's driver GEM namespace. We have
* to import into minigbm.
*/
static int import_into_minigbm(struct dri_driver *dri, struct bo *bo)
{
uint32_t handle;
- int prime_fd, ret;
+ int ret, modifier_upper, modifier_lower, num_planes, i, j;
+ off_t dmabuf_sizes[DRV_MAX_PLANES];
+ __DRIimage *plane_image = NULL;
+
+ if (dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
+ &modifier_upper) &&
+ dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_LOWER,
+ &modifier_lower)) {
+ bo->meta.format_modifiers[0] =
+ ((uint64_t)modifier_upper << 32) | (uint32_t)modifier_lower;
+ } else {
+ bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_INVALID;
+ }
- if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_FD, &prime_fd))
+ if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_NUM_PLANES,
+ &num_planes)) {
return -errno;
+ }
- ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle);
- if (ret) {
- drv_log("drmPrimeFDToHandle failed with %s\n", strerror(errno));
- return ret;
+ bo->meta.num_planes = num_planes;
+
+ for (i = 0; i < num_planes; ++i) {
+ int prime_fd, stride, offset;
+ plane_image = dri->image_extension->fromPlanar(bo->priv, i, NULL);
+ __DRIimage *image = plane_image ? plane_image : bo->priv;
+
+ if (i)
+ bo->meta.format_modifiers[i] = bo->meta.format_modifiers[0];
+
+ if (!dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride) ||
+ !dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ if (!dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_FD, &prime_fd)) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ dmabuf_sizes[i] = lseek(prime_fd, 0, SEEK_END);
+ if (dmabuf_sizes[i] == (off_t)-1) {
+ ret = -errno;
+ close(prime_fd);
+ goto cleanup;
+ }
+
+ lseek(prime_fd, 0, SEEK_SET);
+
+ ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle);
+
+ close(prime_fd);
+
+ if (ret) {
+ drv_log("drmPrimeFDToHandle failed with %s\n", strerror(errno));
+ goto cleanup;
+ }
+
+ bo->handles[i].u32 = handle;
+
+ bo->meta.strides[i] = stride;
+ bo->meta.offsets[i] = offset;
+
+ if (plane_image)
+ dri->image_extension->destroyImage(plane_image);
}
- bo->handles[0].u32 = handle;
- close(prime_fd);
- return 0;
-}
+ for (i = 0; i < num_planes; ++i) {
+ off_t next_plane = dmabuf_sizes[i];
+ for (j = 0; j < num_planes; ++j) {
+ if (bo->meta.offsets[j] < next_plane &&
+ bo->meta.offsets[j] > bo->meta.offsets[i] &&
+ bo->handles[j].u32 == bo->handles[i].u32)
+ next_plane = bo->meta.offsets[j];
+ }
-/*
- * Close Gem Handle
- */
-static void close_gem_handle(uint32_t handle, int fd)
-{
- struct drm_gem_close gem_close;
- int ret = 0;
+ bo->meta.sizes[i] = next_plane - bo->meta.offsets[i];
- memset(&gem_close, 0, sizeof(gem_close));
- gem_close.handle = handle;
- ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
- if (ret)
- drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n", handle, ret);
+ /* This is kind of misleading if different planes use
+ different dmabufs. */
+ bo->meta.total_size += bo->meta.sizes[i];
+ }
+
+ return 0;
+
+cleanup:
+ if (plane_image)
+ dri->image_extension->destroyImage(plane_image);
+ while (--i >= 0) {
+ for (j = 0; j <= i; ++j)
+ if (bo->handles[j].u32 == bo->handles[i].u32)
+ break;
+
+ /* Multiple equivalent handles) */
+ if (i == j)
+ break;
+
+ /* This kind of goes horribly wrong when we already imported
+ * the same handles earlier, as we should really reference
+ * count handles. */
+ close_gem_handle(bo->handles[i].u32, bo->drv->fd);
+ }
+ return ret;
}
/*
@@ -190,10 +279,9 @@ int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t forma
uint64_t use_flags)
{
unsigned int dri_use;
- int ret, dri_format, stride, offset;
+ int ret, dri_format;
struct dri_driver *dri = bo->drv->priv;
- assert(bo->num_planes == 1);
dri_format = drm_format_to_dri_format(format);
/* Gallium drivers require shared to get the handle and stride. */
@@ -216,24 +304,38 @@ int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t forma
if (ret)
goto free_image;
- if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_STRIDE, &stride)) {
- ret = -errno;
- goto close_handle;
+ return 0;
+
+free_image:
+ dri->image_extension->destroyImage(bo->priv);
+ return ret;
+}
+
+int dri_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ const uint64_t *modifiers, uint32_t modifier_count)
+{
+ int ret, dri_format;
+ struct dri_driver *dri = bo->drv->priv;
+
+ if (!dri->image_extension->createImageWithModifiers) {
+ return -ENOENT;
}
- if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) {
+ dri_format = drm_format_to_dri_format(format);
+
+ bo->priv = dri->image_extension->createImageWithModifiers(
+ dri->device, width, height, dri_format, modifiers, modifier_count, NULL);
+ if (!bo->priv) {
ret = -errno;
- goto close_handle;
+ return ret;
}
- bo->strides[0] = stride;
- bo->sizes[0] = stride * height;
- bo->offsets[0] = offset;
- bo->total_size = offset + bo->sizes[0];
+ ret = import_into_minigbm(dri, bo);
+ if (ret)
+ goto free_image;
+
return 0;
-close_handle:
- close_gem_handle(bo->handles[0].u32, bo->drv->fd);
free_image:
dri->image_extension->destroyImage(bo->priv);
return ret;
@@ -244,16 +346,41 @@ int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data)
int ret;
struct dri_driver *dri = bo->drv->priv;
- assert(bo->num_planes == 1);
-
- // clang-format off
- bo->priv = dri->image_extension->createImageFromFds(dri->device, data->width, data->height,
- data->format, data->fds, bo->num_planes,
- (int *)data->strides,
- (int *)data->offsets, NULL);
- // clang-format on
- if (!bo->priv)
- return -errno;
+ if (data->format_modifiers[0] != DRM_FORMAT_MOD_INVALID) {
+ unsigned error;
+
+ if (!dri->image_extension->createImageFromDmaBufs2)
+ return -ENOSYS;
+
+ // clang-format off
+ bo->priv = dri->image_extension->createImageFromDmaBufs2(dri->device, data->width, data->height,
+ data->format,
+ data->format_modifiers[0],
+ data->fds,
+ bo->meta.num_planes,
+ (int *)data->strides,
+ (int *)data->offsets,
+ __DRI_YUV_COLOR_SPACE_UNDEFINED,
+ __DRI_YUV_RANGE_UNDEFINED,
+ __DRI_YUV_CHROMA_SITING_UNDEFINED,
+ __DRI_YUV_CHROMA_SITING_UNDEFINED,
+ &error, NULL);
+ // clang-format on
+
+ /* Could translate the DRI error, but the Mesa GBM also returns ENOSYS. */
+ if (!bo->priv)
+ return -ENOSYS;
+ } else {
+ // clang-format off
+ bo->priv = dri->image_extension->createImageFromFds(dri->device, data->width, data->height,
+ data->format, data->fds,
+ bo->meta.num_planes,
+ (int *)data->strides,
+ (int *)data->offsets, NULL);
+ // clang-format on
+ if (!bo->priv)
+ return -errno;
+ }
ret = import_into_minigbm(dri, bo);
if (ret) {
@@ -289,9 +416,9 @@ void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flag
struct dri_driver *dri = bo->drv->priv;
/* GBM flags and DRI flags are the same. */
- vma->addr =
- dri->image_extension->mapImage(dri->context, bo->priv, 0, 0, bo->width, bo->height,
- map_flags, (int *)&vma->map_strides[plane], &vma->priv);
+ vma->addr = dri->image_extension->mapImage(dri->context, bo->priv, 0, 0, bo->meta.width,
+ bo->meta.height, map_flags,
+ (int *)&vma->map_strides[plane], &vma->priv);
if (!vma->addr)
return MAP_FAILED;
@@ -317,4 +444,22 @@ int dri_bo_unmap(struct bo *bo, struct vma *vma)
return 0;
}
+size_t dri_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier)
+{
+ struct dri_driver *dri = drv->priv;
+ if (!dri->image_extension->queryDmaBufFormatModifierAttribs) {
+ /* We do not do any modifier checks here. The create will fail
+ * later if the modifier is not supported. */
+ return drv_num_planes_from_format(format);
+ }
+
+ uint64_t planes;
+ GLboolean ret = dri->image_extension->queryDmaBufFormatModifierAttribs(
+ dri->device, format, modifier, __DRI_IMAGE_ATTRIB_NUM_PLANES, &planes);
+ if (!ret)
+ return 0;
+
+ return planes;
+}
+
#endif
diff --git a/chromium/third_party/minigbm/src/dri.h b/chromium/third_party/minigbm/src/dri.h
index f79de99c4a0..6218e827e6e 100644
--- a/chromium/third_party/minigbm/src/dri.h
+++ b/chromium/third_party/minigbm/src/dri.h
@@ -6,11 +6,11 @@
#ifdef DRV_AMDGPU
-typedef int GLint;
-typedef unsigned int GLuint;
-typedef unsigned char GLboolean;
-
+// Avoid transitively including a bunch of unnecessary headers.
+#define GL_GLEXT_LEGACY
#include "GL/internal/dri_interface.h"
+#undef GL_GLEXT_LEGACY
+
#include "drv.h"
struct dri_driver {
@@ -30,9 +30,12 @@ int dri_init(struct driver *drv, const char *dri_so_path, const char *driver_suf
void dri_close(struct driver *drv);
int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags);
+int dri_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ const uint64_t *modifiers, uint32_t modifier_count);
int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data);
int dri_bo_destroy(struct bo *bo);
void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags);
int dri_bo_unmap(struct bo *bo, struct vma *vma);
+size_t dri_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier);
#endif
diff --git a/chromium/third_party/minigbm/src/drv.c b/chromium/third_party/minigbm/src/drv.c
index fa157ef00e2..920cf4db4e7 100644
--- a/chromium/third_party/minigbm/src/drv.c
+++ b/chromium/third_party/minigbm/src/drv.c
@@ -54,6 +54,9 @@ extern const struct backend backend_radeon;
#ifdef DRV_ROCKCHIP
extern const struct backend backend_rockchip;
#endif
+#ifdef DRV_SYNAPTICS
+extern const struct backend backend_synaptics;
+#endif
#ifdef DRV_TEGRA
extern const struct backend backend_tegra;
#endif
@@ -104,6 +107,9 @@ static const struct backend *drv_get_backend(int fd)
#ifdef DRV_ROCKCHIP
&backend_rockchip,
#endif
+#ifdef DRV_SYNAPTICS
+ &backend_synaptics,
+#endif
#ifdef DRV_TEGRA
&backend_tegra,
#endif
@@ -114,11 +120,21 @@ static const struct backend *drv_get_backend(int fd)
&backend_vgem, &backend_virtio_gpu,
};
- for (i = 0; i < ARRAY_SIZE(backend_list); i++)
- if (!strcmp(drm_version->name, backend_list[i]->name)) {
+ for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
+ const struct backend *b = backend_list[i];
+ // Exactly one of the main create functions must be defined.
+ assert((b->bo_create != NULL) ^ (b->bo_create_from_metadata != NULL));
+ // Either both or neither must be implemented.
+ assert((b->bo_compute_metadata != NULL) == (b->bo_create_from_metadata != NULL));
+ // Both can't be defined, but it's okay for neither to be (i.e. only bo_create).
+ assert((b->bo_create_with_modifiers == NULL) ||
+ (b->bo_create_from_metadata == NULL));
+
+ if (!strcmp(drm_version->name, b->name)) {
drmFreeVersion(drm_version);
- return backend_list[i];
+ return b;
}
+ }
drmFreeVersion(drm_version);
return NULL;
@@ -223,7 +239,7 @@ struct combination *drv_get_combination(struct driver *drv, uint32_t format, uin
}
struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+ uint64_t use_flags, bool is_test_buffer)
{
struct bo *bo;
@@ -233,13 +249,14 @@ struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint3
return NULL;
bo->drv = drv;
- bo->width = width;
- bo->height = height;
- bo->format = format;
- bo->use_flags = use_flags;
- bo->num_planes = drv_num_planes_from_format(format);
-
- if (!bo->num_planes) {
+ bo->meta.width = width;
+ bo->meta.height = height;
+ bo->meta.format = format;
+ bo->meta.use_flags = use_flags;
+ bo->meta.num_planes = drv_num_planes_from_format(format);
+ bo->is_test_buffer = is_test_buffer;
+
+ if (!bo->meta.num_planes) {
free(bo);
return NULL;
}
@@ -253,13 +270,25 @@ struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, ui
int ret;
size_t plane;
struct bo *bo;
+ bool is_test_alloc;
- bo = drv_bo_new(drv, width, height, format, use_flags);
+ is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
+ use_flags &= ~BO_USE_TEST_ALLOC;
+
+ bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
if (!bo)
return NULL;
- ret = drv->backend->bo_create(bo, width, height, format, use_flags);
+ ret = -EINVAL;
+ if (drv->backend->bo_compute_metadata) {
+ ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
+ 0);
+ if (!is_test_alloc && ret == 0)
+ ret = drv->backend->bo_create_from_metadata(bo);
+ } else if (!is_test_alloc) {
+ ret = drv->backend->bo_create(bo, width, height, format, use_flags);
+ }
if (ret) {
free(bo);
@@ -268,9 +297,9 @@ struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, ui
pthread_mutex_lock(&drv->driver_lock);
- for (plane = 0; plane < bo->num_planes; plane++) {
+ for (plane = 0; plane < bo->meta.num_planes; plane++) {
if (plane > 0)
- assert(bo->offsets[plane] >= bo->offsets[plane - 1]);
+ assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
drv_increment_reference_count(drv, bo, plane);
}
@@ -287,17 +316,26 @@ struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint
size_t plane;
struct bo *bo;
- if (!drv->backend->bo_create_with_modifiers) {
+ if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
errno = ENOENT;
return NULL;
}
- bo = drv_bo_new(drv, width, height, format, BO_USE_NONE);
+ bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
if (!bo)
return NULL;
- ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers, count);
+ ret = -EINVAL;
+ if (drv->backend->bo_compute_metadata) {
+ ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
+ modifiers, count);
+ if (ret == 0)
+ ret = drv->backend->bo_create_from_metadata(bo);
+ } else {
+ ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
+ count);
+ }
if (ret) {
free(bo);
@@ -306,9 +344,9 @@ struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint
pthread_mutex_lock(&drv->driver_lock);
- for (plane = 0; plane < bo->num_planes; plane++) {
+ for (plane = 0; plane < bo->meta.num_planes; plane++) {
if (plane > 0)
- assert(bo->offsets[plane] >= bo->offsets[plane - 1]);
+ assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
drv_increment_reference_count(drv, bo, plane);
}
@@ -320,23 +358,27 @@ struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint
void drv_bo_destroy(struct bo *bo)
{
+ int ret;
size_t plane;
uintptr_t total = 0;
struct driver *drv = bo->drv;
- pthread_mutex_lock(&drv->driver_lock);
+ if (!bo->is_test_buffer) {
+ pthread_mutex_lock(&drv->driver_lock);
- for (plane = 0; plane < bo->num_planes; plane++)
- drv_decrement_reference_count(drv, bo, plane);
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
+ drv_decrement_reference_count(drv, bo, plane);
- for (plane = 0; plane < bo->num_planes; plane++)
- total += drv_get_reference_count(drv, bo, plane);
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
+ total += drv_get_reference_count(drv, bo, plane);
- pthread_mutex_unlock(&drv->driver_lock);
+ pthread_mutex_unlock(&drv->driver_lock);
- if (total == 0) {
- assert(drv_mapping_destroy(bo) == 0);
- bo->drv->backend->bo_destroy(bo);
+ if (total == 0) {
+ ret = drv_mapping_destroy(bo);
+ assert(ret == 0);
+ bo->drv->backend->bo_destroy(bo);
+ }
}
free(bo);
@@ -349,7 +391,7 @@ struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
struct bo *bo;
off_t seek_end;
- bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags);
+ bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
if (!bo)
return NULL;
@@ -360,16 +402,16 @@ struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
return NULL;
}
- for (plane = 0; plane < bo->num_planes; plane++) {
+ for (plane = 0; plane < bo->meta.num_planes; plane++) {
pthread_mutex_lock(&bo->drv->driver_lock);
drv_increment_reference_count(bo->drv, bo, plane);
pthread_mutex_unlock(&bo->drv->driver_lock);
}
- for (plane = 0; plane < bo->num_planes; plane++) {
- bo->strides[plane] = data->strides[plane];
- bo->offsets[plane] = data->offsets[plane];
- bo->format_modifiers[plane] = data->format_modifiers[plane];
+ for (plane = 0; plane < bo->meta.num_planes; plane++) {
+ bo->meta.strides[plane] = data->strides[plane];
+ bo->meta.offsets[plane] = data->offsets[plane];
+ bo->meta.format_modifiers[plane] = data->format_modifiers[plane];
seek_end = lseek(data->fds[plane], 0, SEEK_END);
if (seek_end == (off_t)(-1)) {
@@ -378,17 +420,17 @@ struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
}
lseek(data->fds[plane], 0, SEEK_SET);
- if (plane == bo->num_planes - 1 || data->offsets[plane + 1] == 0)
- bo->sizes[plane] = seek_end - data->offsets[plane];
+ if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
+ bo->meta.sizes[plane] = seek_end - data->offsets[plane];
else
- bo->sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
+ bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
- if ((int64_t)bo->offsets[plane] + bo->sizes[plane] > seek_end) {
+ if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
drv_log("buffer size is too large.\n");
goto destroy_bo;
}
- bo->total_size += bo->sizes[plane];
+ bo->meta.total_size += bo->meta.sizes[plane];
}
return bo;
@@ -411,7 +453,11 @@ void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags
assert(rect->y + rect->height <= drv_bo_get_height(bo));
assert(BO_MAP_READ_WRITE & map_flags);
/* No CPU access for protected buffers. */
- assert(!(bo->use_flags & BO_USE_PROTECTED));
+ assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
+
+ if (bo->is_test_buffer) {
+ return MAP_FAILED;
+ }
memset(&mapping, 0, sizeof(mapping));
mapping.rect = *rect;
@@ -446,7 +492,7 @@ void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags
}
mapping.vma = calloc(1, sizeof(*mapping.vma));
- memcpy(mapping.vma->map_strides, bo->strides, sizeof(mapping.vma->map_strides));
+ memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
addr = bo->drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
if (addr == MAP_FAILED) {
*map_data = NULL;
@@ -520,7 +566,7 @@ int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
assert(mapping->vma);
assert(mapping->refcount > 0);
assert(mapping->vma->refcount > 0);
- assert(!(bo->use_flags & BO_USE_PROTECTED));
+ assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
if (bo->drv->backend->bo_flush)
ret = bo->drv->backend->bo_flush(bo, mapping);
@@ -532,22 +578,17 @@ int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
uint32_t drv_bo_get_width(struct bo *bo)
{
- return bo->width;
+ return bo->meta.width;
}
uint32_t drv_bo_get_height(struct bo *bo)
{
- return bo->height;
-}
-
-uint32_t drv_bo_get_stride_or_tiling(struct bo *bo)
-{
- return bo->tiling ? bo->tiling : drv_bo_get_plane_stride(bo, 0);
+ return bo->meta.height;
}
size_t drv_bo_get_num_planes(struct bo *bo)
{
- return bo->num_planes;
+ return bo->meta.num_planes;
}
union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
@@ -563,7 +604,11 @@ int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
{
int ret, fd;
- assert(plane < bo->num_planes);
+ assert(plane < bo->meta.num_planes);
+
+ if (bo->is_test_buffer) {
+ return -EINVAL;
+ }
ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
@@ -576,31 +621,31 @@ int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
{
- assert(plane < bo->num_planes);
- return bo->offsets[plane];
+ assert(plane < bo->meta.num_planes);
+ return bo->meta.offsets[plane];
}
uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
{
- assert(plane < bo->num_planes);
- return bo->sizes[plane];
+ assert(plane < bo->meta.num_planes);
+ return bo->meta.sizes[plane];
}
uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
{
- assert(plane < bo->num_planes);
- return bo->strides[plane];
+ assert(plane < bo->meta.num_planes);
+ return bo->meta.strides[plane];
}
uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane)
{
- assert(plane < bo->num_planes);
- return bo->format_modifiers[plane];
+ assert(plane < bo->meta.num_planes);
+ return bo->meta.format_modifiers[plane];
}
uint32_t drv_bo_get_format(struct bo *bo)
{
- return bo->format;
+ return bo->meta.format;
}
uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
@@ -616,7 +661,11 @@ uint32_t drv_num_buffers_per_bo(struct bo *bo)
uint32_t count = 0;
size_t plane, p;
- for (plane = 0; plane < bo->num_planes; plane++) {
+ if (bo->is_test_buffer) {
+ return 0;
+ }
+
+ for (plane = 0; plane < bo->meta.num_planes; plane++) {
for (p = 0; p < plane; p++)
if (bo->handles[p].u32 == bo->handles[plane].u32)
break;
@@ -642,3 +691,17 @@ void drv_log_prefix(const char *prefix, const char *file, int line, const char *
#endif
va_end(args);
}
+
+int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES])
+{
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
+ strides[plane] = bo->meta.strides[plane];
+ offsets[plane] = bo->meta.offsets[plane];
+ }
+
+ if (bo->drv->backend->resource_info)
+ return bo->drv->backend->resource_info(bo, strides, offsets);
+
+ return 0;
+}
diff --git a/chromium/third_party/minigbm/src/drv.h b/chromium/third_party/minigbm/src/drv.h
index d3cf9d3e041..2b86aad80ff 100644
--- a/chromium/third_party/minigbm/src/drv.h
+++ b/chromium/third_party/minigbm/src/drv.h
@@ -12,6 +12,7 @@ extern "C" {
#endif
#include <drm_fourcc.h>
+#include <stdbool.h>
#include <stdint.h>
#define DRV_MAX_PLANES 4
@@ -23,22 +24,24 @@ extern "C" {
#define BO_USE_CURSOR (1ull << 1)
#define BO_USE_CURSOR_64X64 BO_USE_CURSOR
#define BO_USE_RENDERING (1ull << 2)
-#define BO_USE_LINEAR (1ull << 3)
-#define BO_USE_SW_READ_NEVER (1ull << 4)
-#define BO_USE_SW_READ_RARELY (1ull << 5)
-#define BO_USE_SW_READ_OFTEN (1ull << 6)
-#define BO_USE_SW_WRITE_NEVER (1ull << 7)
-#define BO_USE_SW_WRITE_RARELY (1ull << 8)
-#define BO_USE_SW_WRITE_OFTEN (1ull << 9)
-#define BO_USE_EXTERNAL_DISP (1ull << 10)
-#define BO_USE_PROTECTED (1ull << 11)
-#define BO_USE_HW_VIDEO_ENCODER (1ull << 12)
-#define BO_USE_CAMERA_WRITE (1ull << 13)
-#define BO_USE_CAMERA_READ (1ull << 14)
+/* Skip for GBM_BO_USE_WRITE */
+#define BO_USE_LINEAR (1ull << 4)
+#define BO_USE_TEXTURE (1ull << 5)
+#define BO_USE_CAMERA_WRITE (1ull << 6)
+#define BO_USE_CAMERA_READ (1ull << 7)
+#define BO_USE_PROTECTED (1ull << 8)
+#define BO_USE_SW_READ_OFTEN (1ull << 9)
+#define BO_USE_SW_READ_RARELY (1ull << 10)
+#define BO_USE_SW_WRITE_OFTEN (1ull << 11)
+#define BO_USE_SW_WRITE_RARELY (1ull << 12)
+#define BO_USE_HW_VIDEO_DECODER (1ull << 13)
+#define BO_USE_HW_VIDEO_ENCODER (1ull << 14)
+#define BO_USE_TEST_ALLOC (1ull << 15)
#define BO_USE_RENDERSCRIPT (1ull << 16)
-#define BO_USE_TEXTURE (1ull << 17)
-#define BO_USE_HW_VIDEO_DECODER (1ull << 18)
+/* Quirks for allocating a buffer. */
+#define BO_QUIRK_NONE 0
+#define BO_QUIRK_DUMB32BPP (1ull << 0)
/* Map flags */
#define BO_MAP_NONE 0
@@ -50,12 +53,16 @@ extern "C" {
* on the namespace of already defined formats, which can be done by using invalid
* fourcc codes.
*/
-
#define DRM_FORMAT_NONE fourcc_code('0', '0', '0', '0')
#define DRM_FORMAT_YVU420_ANDROID fourcc_code('9', '9', '9', '7')
#define DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED fourcc_code('9', '9', '9', '8')
#define DRM_FORMAT_FLEX_YCbCr_420_888 fourcc_code('9', '9', '9', '9')
+/* This is a 10-bit bayer format for private reprocessing on MediaTek ISP. It's
+ * a private RAW format that other DRM drivers will never support and thus
+ * making it not upstreamable (i.e., defined in official DRM headers). */
+#define DRM_FORMAT_MTISP_SXYZW10 fourcc_code('M', 'B', '1', '0')
+
// TODO(crbug.com/958181): remove this definition once drm_fourcc.h contains it.
#ifndef DRM_FORMAT_P010
#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0')
@@ -119,7 +126,7 @@ const char *drv_get_name(struct driver *drv);
struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags);
struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags);
+ uint64_t use_flags, bool is_test_buffer);
struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags);
@@ -144,8 +151,6 @@ uint32_t drv_bo_get_width(struct bo *bo);
uint32_t drv_bo_get_height(struct bo *bo);
-uint32_t drv_bo_get_stride_or_tiling(struct bo *bo);
-
size_t drv_bo_get_num_planes(struct bo *bo);
union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane);
@@ -170,8 +175,13 @@ uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_fl
size_t drv_num_planes_from_format(uint32_t format);
+size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier);
+
uint32_t drv_num_buffers_per_bo(struct bo *bo);
+int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES]);
+
#define drv_log(format, ...) \
do { \
drv_log_prefix("minigbm", __FILE__, __LINE__, format, ##__VA_ARGS__); \
diff --git a/chromium/third_party/minigbm/src/drv_priv.h b/chromium/third_party/minigbm/src/drv_priv.h
index 46e64905796..32c082d6692 100644
--- a/chromium/third_party/minigbm/src/drv_priv.h
+++ b/chromium/third_party/minigbm/src/drv_priv.h
@@ -8,33 +8,33 @@
#define DRV_PRIV_H
#include <pthread.h>
+#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/types.h>
#include "drv.h"
-struct bo {
- struct driver *drv;
+struct bo_metadata {
uint32_t width;
uint32_t height;
uint32_t format;
uint32_t tiling;
size_t num_planes;
- union bo_handle handles[DRV_MAX_PLANES];
uint32_t offsets[DRV_MAX_PLANES];
uint32_t sizes[DRV_MAX_PLANES];
uint32_t strides[DRV_MAX_PLANES];
uint64_t format_modifiers[DRV_MAX_PLANES];
uint64_t use_flags;
size_t total_size;
- void *priv;
};
-struct kms_item {
- uint32_t format;
- uint64_t modifier;
- uint64_t use_flags;
+struct bo {
+ struct driver *drv;
+ struct bo_metadata meta;
+ bool is_test_buffer;
+ union bo_handle handles[DRV_MAX_PLANES];
+ void *priv;
};
struct format_metadata {
@@ -67,6 +67,11 @@ struct backend {
uint64_t use_flags);
int (*bo_create_with_modifiers)(struct bo *bo, uint32_t width, uint32_t height,
uint32_t format, const uint64_t *modifiers, uint32_t count);
+ // Either both or neither _metadata functions must be implemented.
+ // If the functions are implemented, bo_create and bo_create_with_modifiers must not be.
+ int (*bo_compute_metadata)(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags, const uint64_t *modifiers, uint32_t count);
+ int (*bo_create_from_metadata)(struct bo *bo);
int (*bo_destroy)(struct bo *bo);
int (*bo_import)(struct bo *bo, struct drv_import_fd_data *data);
void *(*bo_map)(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags);
@@ -74,19 +79,25 @@ struct backend {
int (*bo_invalidate)(struct bo *bo, struct mapping *mapping);
int (*bo_flush)(struct bo *bo, struct mapping *mapping);
uint32_t (*resolve_format)(struct driver *drv, uint32_t format, uint64_t use_flags);
+ size_t (*num_planes_from_modifier)(struct driver *drv, uint32_t format, uint64_t modifier);
+ int (*resource_info)(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES]);
};
// clang-format off
-#define BO_USE_RENDER_MASK BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERING | \
+#define BO_USE_RENDER_MASK (BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERING | \
BO_USE_RENDERSCRIPT | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
- BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE
+ BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE)
-#define BO_USE_TEXTURE_MASK BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERSCRIPT | \
+#define BO_USE_TEXTURE_MASK (BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERSCRIPT | \
BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
- BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE
+ BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE)
+
+#define BO_USE_SW_MASK (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
+ BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY)
-#define BO_USE_SW_MASK BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
- BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY
+#define BO_USE_NON_GPU_HW (BO_USE_SCANOUT | BO_USE_CAMERA_WRITE | BO_USE_CAMERA_READ | \
+ BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)
#ifndef DRM_FORMAT_MOD_LINEAR
#define DRM_FORMAT_MOD_LINEAR DRM_FORMAT_MOD_NONE
diff --git a/chromium/third_party/minigbm/src/exynos.c b/chromium/third_party/minigbm/src/exynos.c
index b2b4040db32..6a801071fdf 100644
--- a/chromium/third_party/minigbm/src/exynos.c
+++ b/chromium/third_party/minigbm/src/exynos.c
@@ -45,16 +45,16 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint
width = ALIGN(width, 16);
height = ALIGN(height, 32);
chroma_height = ALIGN(height / 2, 32);
- bo->strides[0] = bo->strides[1] = width;
+ bo->meta.strides[0] = bo->meta.strides[1] = width;
/* MFC v8+ requires 64 byte padding in the end of luma and chroma buffers. */
- bo->sizes[0] = bo->strides[0] * height + 64;
- bo->sizes[1] = bo->strides[1] * chroma_height + 64;
- bo->offsets[0] = bo->offsets[1] = 0;
- bo->total_size = bo->sizes[0] + bo->sizes[1];
+ bo->meta.sizes[0] = bo->meta.strides[0] * height + 64;
+ bo->meta.sizes[1] = bo->meta.strides[1] * chroma_height + 64;
+ bo->meta.offsets[0] = bo->meta.offsets[1] = 0;
+ bo->meta.total_size = bo->meta.sizes[0] + bo->meta.sizes[1];
} else if (format == DRM_FORMAT_XRGB8888 || format == DRM_FORMAT_ARGB8888) {
- bo->strides[0] = drv_stride_from_format(format, width, 0);
- bo->total_size = bo->sizes[0] = height * bo->strides[0];
- bo->offsets[0] = 0;
+ bo->meta.strides[0] = drv_stride_from_format(format, width, 0);
+ bo->meta.total_size = bo->meta.sizes[0] = height * bo->meta.strides[0];
+ bo->meta.offsets[0] = 0;
} else {
drv_log("unsupported format %X\n", format);
assert(0);
@@ -62,8 +62,8 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint
}
int ret;
- for (plane = 0; plane < bo->num_planes; plane++) {
- size_t size = bo->sizes[plane];
+ for (plane = 0; plane < bo->meta.num_planes; plane++) {
+ size_t size = bo->meta.sizes[plane];
struct drm_exynos_gem_create gem_create;
memset(&gem_create, 0, sizeof(gem_create));
diff --git a/chromium/third_party/minigbm/src/gbm.c b/chromium/third_party/minigbm/src/gbm.c
index 7f9ed995638..ab5b3f7049a 100644
--- a/chromium/third_party/minigbm/src/gbm.c
+++ b/chromium/third_party/minigbm/src/gbm.c
@@ -41,6 +41,12 @@ PUBLIC int gbm_device_is_format_supported(struct gbm_device *gbm, uint32_t forma
return (drv_get_combination(gbm->drv, format, use_flags) != NULL);
}
+PUBLIC int gbm_device_get_format_modifier_plane_count(struct gbm_device *gbm, uint32_t format,
+ uint64_t modifier)
+{
+ return 0;
+}
+
PUBLIC struct gbm_device *gbm_create_device(int fd)
{
struct gbm_device *gbm;
@@ -76,9 +82,15 @@ PUBLIC struct gbm_surface *gbm_surface_create(struct gbm_device *gbm, uint32_t w
return surface;
}
-PUBLIC void gbm_surface_destroy(struct gbm_surface *surface)
+PUBLIC struct gbm_surface *gbm_surface_create_with_modifiers(struct gbm_device *gbm, uint32_t width,
+ uint32_t height, uint32_t format,
+ const uint64_t *modifiers,
+ const unsigned int count)
{
- free(surface);
+ if (count != 0 || modifiers != NULL)
+ return NULL;
+
+ return gbm_surface_create(gbm, width, height, format, 0);
}
PUBLIC struct gbm_bo *gbm_surface_lock_front_buffer(struct gbm_surface *surface)
@@ -90,6 +102,16 @@ PUBLIC void gbm_surface_release_buffer(struct gbm_surface *surface, struct gbm_b
{
}
+PUBLIC int gbm_surface_has_free_buffers(struct gbm_surface *surface)
+{
+ return 0;
+}
+
+PUBLIC void gbm_surface_destroy(struct gbm_surface *surface)
+{
+ free(surface);
+}
+
static struct gbm_bo *gbm_bo_new(struct gbm_device *gbm, uint32_t format)
{
struct gbm_bo *bo;
@@ -117,6 +139,14 @@ PUBLIC struct gbm_bo *gbm_bo_create(struct gbm_device *gbm, uint32_t width, uint
if (!bo)
return NULL;
+ /*
+ * HACK: This is for HAL_PIXEL_FORMAT_YV12 buffers allocated by arcvm.
+ * None of our platforms can display YV12, so we can treat as a SW buffer.
+ * Remove once this can be intelligently resolved in the guest.
+ */
+ if (format == GBM_FORMAT_YVU420 && (usage & GBM_BO_USE_LINEAR))
+ format = DRM_FORMAT_YVU420_ANDROID;
+
bo->bo = drv_bo_create(gbm->drv, width, height, format, gbm_convert_usage(usage));
if (!bo->bo) {
@@ -166,7 +196,6 @@ PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void
struct gbm_bo *bo;
struct drv_import_fd_data drv_data;
struct gbm_import_fd_data *fd_data = buffer;
- struct gbm_import_fd_planar_data *fd_planar_data = buffer;
struct gbm_import_fd_modifier_data *fd_modifier_data = buffer;
uint32_t gbm_format;
size_t num_planes, i, num_fds;
@@ -181,13 +210,17 @@ PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void
drv_data.format = fd_data->format;
drv_data.fds[0] = fd_data->fd;
drv_data.strides[0] = fd_data->stride;
+
+ for (i = 0; i < GBM_MAX_PLANES; ++i)
+ drv_data.format_modifiers[i] = DRM_FORMAT_MOD_INVALID;
break;
case GBM_BO_IMPORT_FD_MODIFIER:
gbm_format = fd_modifier_data->format;
drv_data.width = fd_modifier_data->width;
drv_data.height = fd_modifier_data->height;
drv_data.format = fd_modifier_data->format;
- num_planes = drv_num_planes_from_format(drv_data.format);
+ num_planes = drv_num_planes_from_modifier(gbm->drv, drv_data.format,
+ fd_modifier_data->modifier);
assert(num_planes);
num_fds = fd_modifier_data->num_fds;
@@ -208,26 +241,6 @@ PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void
drv_data.fds[i] = -1;
break;
- case GBM_BO_IMPORT_FD_PLANAR:
- gbm_format = fd_planar_data->format;
- drv_data.width = fd_planar_data->width;
- drv_data.height = fd_planar_data->height;
- drv_data.format = fd_planar_data->format;
- num_planes = drv_num_planes_from_format(drv_data.format);
-
- assert(num_planes);
-
- for (i = 0; i < num_planes; i++) {
- drv_data.fds[i] = fd_planar_data->fds[i];
- drv_data.offsets[i] = fd_planar_data->offsets[i];
- drv_data.strides[i] = fd_planar_data->strides[i];
- drv_data.format_modifiers[i] = fd_planar_data->format_modifiers[i];
- }
-
- for (i = num_planes; i < GBM_MAX_PLANES; i++)
- drv_data.fds[i] = -1;
-
- break;
default:
return NULL;
}
@@ -250,30 +263,6 @@ PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void
return bo;
}
-PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t transfer_flags, uint32_t *stride, void **map_data, size_t plane)
-{
- void *addr;
- off_t offset;
- uint32_t map_flags;
- struct rectangle rect = { .x = x, .y = y, .width = width, .height = height };
- if (!bo || width == 0 || height == 0 || !stride || !map_data)
- return NULL;
-
- map_flags = (transfer_flags & GBM_BO_TRANSFER_READ) ? BO_MAP_READ : BO_MAP_NONE;
- map_flags |= (transfer_flags & GBM_BO_TRANSFER_WRITE) ? BO_MAP_WRITE : BO_MAP_NONE;
-
- addr = drv_bo_map(bo->bo, &rect, map_flags, (struct mapping **)map_data, plane);
- if (addr == MAP_FAILED)
- return MAP_FAILED;
-
- *stride = ((struct mapping *)*map_data)->vma->map_strides[plane];
-
- offset = *stride * rect.y;
- offset += rect.x * drv_bytes_per_pixel_from_format(bo->gbm_format, plane);
- return (void *)((uint8_t *)addr + offset);
-}
-
PUBLIC void gbm_bo_unmap(struct gbm_bo *bo, void *map_data)
{
assert(bo);
@@ -295,24 +284,19 @@ PUBLIC uint32_t gbm_bo_get_stride(struct gbm_bo *bo)
return gbm_bo_get_stride_for_plane(bo, 0);
}
-PUBLIC uint32_t gbm_bo_get_stride_or_tiling(struct gbm_bo *bo)
-{
- return drv_bo_get_stride_or_tiling(bo->bo);
-}
-
PUBLIC uint32_t gbm_bo_get_format(struct gbm_bo *bo)
{
return bo->gbm_format;
}
-PUBLIC uint64_t gbm_bo_get_format_modifier(struct gbm_bo *bo)
+PUBLIC uint32_t gbm_bo_get_bpp(struct gbm_bo *bo)
{
- return gbm_bo_get_modifier(bo);
+ return drv_bytes_per_pixel_from_format(drv_bo_get_format(bo->bo), 0);
}
PUBLIC uint64_t gbm_bo_get_modifier(struct gbm_bo *bo)
{
- return gbm_bo_get_plane_format_modifier(bo, 0);
+ return drv_bo_get_plane_format_modifier(bo->bo, 0);
}
PUBLIC struct gbm_device *gbm_bo_get_device(struct gbm_bo *bo)
@@ -330,69 +314,112 @@ PUBLIC int gbm_bo_get_fd(struct gbm_bo *bo)
return gbm_bo_get_plane_fd(bo, 0);
}
-PUBLIC size_t gbm_bo_get_num_planes(struct gbm_bo *bo)
-{
- return gbm_bo_get_plane_count(bo);
-}
-
-PUBLIC size_t gbm_bo_get_plane_count(struct gbm_bo *bo)
+PUBLIC int gbm_bo_get_plane_count(struct gbm_bo *bo)
{
return drv_bo_get_num_planes(bo->bo);
}
-PUBLIC union gbm_bo_handle gbm_bo_get_plane_handle(struct gbm_bo *bo, size_t plane)
+PUBLIC union gbm_bo_handle gbm_bo_get_handle_for_plane(struct gbm_bo *bo, size_t plane)
{
- return gbm_bo_get_handle_for_plane(bo, plane);
+ return (union gbm_bo_handle)drv_bo_get_plane_handle(bo->bo, (size_t)plane).u64;
}
-PUBLIC union gbm_bo_handle gbm_bo_get_handle_for_plane(struct gbm_bo *bo, size_t plane)
+PUBLIC uint32_t gbm_bo_get_offset(struct gbm_bo *bo, size_t plane)
{
- return (union gbm_bo_handle)drv_bo_get_plane_handle(bo->bo, plane).u64;
+ return drv_bo_get_plane_offset(bo->bo, (size_t)plane);
}
-PUBLIC int gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane)
+PUBLIC uint32_t gbm_bo_get_stride_for_plane(struct gbm_bo *bo, size_t plane)
{
- return drv_bo_get_plane_fd(bo->bo, plane);
+ return drv_bo_get_plane_stride(bo->bo, (size_t)plane);
}
-PUBLIC uint32_t gbm_bo_get_plane_offset(struct gbm_bo *bo, size_t plane)
+PUBLIC void gbm_bo_set_user_data(struct gbm_bo *bo, void *data,
+ void (*destroy_user_data)(struct gbm_bo *, void *))
{
- return gbm_bo_get_offset(bo, plane);
+ bo->user_data = data;
+ bo->destroy_user_data = destroy_user_data;
}
-PUBLIC uint32_t gbm_bo_get_offset(struct gbm_bo *bo, size_t plane)
+PUBLIC void *gbm_bo_get_user_data(struct gbm_bo *bo)
{
- return drv_bo_get_plane_offset(bo->bo, plane);
+ return bo->user_data;
}
-PUBLIC uint32_t gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane)
+/* The two GBM_BO_FORMAT_[XA]RGB8888 formats alias the GBM_FORMAT_*
+ * formats of the same name. We want to accept them whenever someone
+ * has a GBM format, but never return them to the user.
+ */
+static uint32_t gbm_format_canonicalize(uint32_t gbm_format)
{
- return drv_bo_get_plane_size(bo->bo, plane);
+ switch (gbm_format) {
+ case GBM_BO_FORMAT_XRGB8888:
+ return GBM_FORMAT_XRGB8888;
+ case GBM_BO_FORMAT_ARGB8888:
+ return GBM_FORMAT_ARGB8888;
+ default:
+ return gbm_format;
+ }
}
-PUBLIC uint32_t gbm_bo_get_plane_stride(struct gbm_bo *bo, size_t plane)
+/**
+ * Returns a string representing the fourcc format name.
+ */
+PUBLIC char *gbm_format_get_name(uint32_t gbm_format, struct gbm_format_name_desc *desc)
{
- return gbm_bo_get_stride_for_plane(bo, plane);
+ gbm_format = gbm_format_canonicalize(gbm_format);
+
+ desc->name[0] = gbm_format;
+ desc->name[1] = gbm_format >> 8;
+ desc->name[2] = gbm_format >> 16;
+ desc->name[3] = gbm_format >> 24;
+ desc->name[4] = 0;
+
+ return desc->name;
}
-PUBLIC uint32_t gbm_bo_get_stride_for_plane(struct gbm_bo *bo, size_t plane)
+/*
+ * The following functions are not deprecated, but not in the Mesa the gbm
+ * header. The main difference is minigbm allows for the possibility of
+ * disjoint YUV images, while Mesa GBM does not.
+ */
+PUBLIC uint32_t gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane)
{
- return drv_bo_get_plane_stride(bo->bo, plane);
+ return drv_bo_get_plane_size(bo->bo, plane);
}
-PUBLIC uint64_t gbm_bo_get_plane_format_modifier(struct gbm_bo *bo, size_t plane)
+PUBLIC int gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane)
{
- return drv_bo_get_plane_format_modifier(bo->bo, plane);
+ return drv_bo_get_plane_fd(bo->bo, plane);
}
-PUBLIC void gbm_bo_set_user_data(struct gbm_bo *bo, void *data,
- void (*destroy_user_data)(struct gbm_bo *, void *))
+PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+ uint32_t transfer_flags, uint32_t *stride, void **map_data, size_t plane)
{
- bo->user_data = data;
- bo->destroy_user_data = destroy_user_data;
+ return gbm_bo_map2(bo, x, y, width, height, transfer_flags, stride, map_data, plane);
}
-PUBLIC void *gbm_bo_get_user_data(struct gbm_bo *bo)
+PUBLIC void *gbm_bo_map2(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+ uint32_t transfer_flags, uint32_t *stride, void **map_data, int plane)
{
- return bo->user_data;
+ void *addr;
+ off_t offset;
+ uint32_t map_flags;
+ plane = (size_t)plane;
+ struct rectangle rect = { .x = x, .y = y, .width = width, .height = height };
+ if (!bo || width == 0 || height == 0 || !stride || !map_data)
+ return NULL;
+
+ map_flags = (transfer_flags & GBM_BO_TRANSFER_READ) ? BO_MAP_READ : BO_MAP_NONE;
+ map_flags |= (transfer_flags & GBM_BO_TRANSFER_WRITE) ? BO_MAP_WRITE : BO_MAP_NONE;
+
+ addr = drv_bo_map(bo->bo, &rect, map_flags, (struct mapping **)map_data, plane);
+ if (addr == MAP_FAILED)
+ return MAP_FAILED;
+
+ *stride = ((struct mapping *)*map_data)->vma->map_strides[plane];
+
+ offset = *stride * rect.y;
+ offset += rect.x * drv_bytes_per_pixel_from_format(bo->gbm_format, plane);
+ return (void *)((uint8_t *)addr + offset);
}
diff --git a/chromium/third_party/minigbm/src/gbm.h b/chromium/third_party/minigbm/src/gbm.h
index a2f10f64553..24927288e18 100644
--- a/chromium/third_party/minigbm/src/gbm.h
+++ b/chromium/third_party/minigbm/src/gbm.h
@@ -28,20 +28,16 @@
#ifndef _GBM_H_
#define _GBM_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
#define __GBM__ 1
-#ifndef MINIGBM
-#define MINIGBM
-#endif
-
#include <stddef.h>
#include <stdint.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
/**
* \file gbm.h
* \brief Generic Buffer Manager
@@ -73,8 +69,20 @@ union gbm_bo_handle {
uint64_t u64;
};
-#define GBM_MAX_PLANES 4
+/** Format of the allocated buffer */
+enum gbm_bo_format {
+ /** RGB with 8 bits per channel in a 32 bit value */
+ GBM_BO_FORMAT_XRGB8888,
+ /** ARGB with 8 bits per channel in a 32 bit value */
+ GBM_BO_FORMAT_ARGB8888
+};
+
+/**
+ * The FourCC format codes are taken from the drm_fourcc.h definition, and
+ * re-namespaced. New GBM formats must not be added, unless they are
+ * identical ports from drm_fourcc.
+ */
#define __gbm_fourcc_code(a,b,c,d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
@@ -87,7 +95,6 @@ union gbm_bo_handle {
#define GBM_FORMAT_R8 __gbm_fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
/* 16 bpp RG */
-#define GBM_FORMAT_RG88 __gbm_fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
#define GBM_FORMAT_GR88 __gbm_fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
/* 8 bpp RGB */
@@ -143,6 +150,15 @@ union gbm_bo_handle {
#define GBM_FORMAT_RGBA1010102 __gbm_fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define GBM_FORMAT_BGRA1010102 __gbm_fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/*
+ * Floating point 64bpp RGB
+ * IEEE 754-2008 binary16 half-precision float
+ * [15:0] sign:exponent:mantissa 1:5:10
+ */
+#define GBM_FORMAT_XBGR16161616F __gbm_fourcc_code('X', 'B', '4', 'H') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define GBM_FORMAT_ABGR16161616F __gbm_fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
/* packed YCbCr */
#define GBM_FORMAT_YUYV __gbm_fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define GBM_FORMAT_YVYU __gbm_fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
@@ -183,28 +199,9 @@ union gbm_bo_handle {
#define GBM_FORMAT_YUV444 __gbm_fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define GBM_FORMAT_YVU444 __gbm_fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
-/*
- * Format Modifiers:
- *
- * Format modifiers describe, typically, a re-ordering or modification
- * of the data in a plane of an FB. This can be used to express tiled/
- * swizzled formats, or compression, or a combination of the two.
- *
- * The upper 8 bits of the format modifier are a vendor-id as assigned
- * below. The lower 56 bits are assigned as vendor sees fit.
- */
-
-/* Vendor Ids: */
-#define GBM_FORMAT_MOD_NONE 0
-#define GBM_FORMAT_MOD_VENDOR_INTEL 0x01
-#define GBM_FORMAT_MOD_VENDOR_AMD 0x02
-#define GBM_FORMAT_MOD_VENDOR_NV 0x03
-#define GBM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
-#define GBM_FORMAT_MOD_VENDOR_QCOM 0x05
-/* add more to the end as needed */
-
-#define gbm_fourcc_mod_code(vendor, val) \
- ((((__u64)GBM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+struct gbm_format_name_desc {
+ char name[5];
+};
/**
* Flags to indicate the intended use for the buffer - these are passed into
@@ -233,17 +230,14 @@ enum gbm_bo_flags {
*/
GBM_BO_USE_RENDERING = (1 << 2),
/**
- * Deprecated
+ * Buffer can be used for gbm_bo_write. This is guaranteed to work
+ * with GBM_BO_USE_CURSOR, but may not work for other combinations.
*/
GBM_BO_USE_WRITE = (1 << 3),
/**
- * Buffer is guaranteed to be laid out linearly in memory. That is, the
- * buffer is laid out as an array with 'height' blocks, each block with
- * length 'stride'. Each stride is in the same order as the rows of the
- * buffer. This is intended to be used with buffers that will be accessed
- * via dma-buf mmap().
+ * Buffer is linear, i.e. not tiled.
*/
- GBM_BO_USE_LINEAR = (1 << 4),
+ GBM_BO_USE_LINEAR = (1 << 4),
/**
* The buffer will be used as a texture that will be sampled from.
*/
@@ -277,6 +271,15 @@ enum gbm_bo_flags {
* The buffer will be read by a video encode accelerator.
*/
GBM_BO_USE_HW_VIDEO_ENCODER = (1 << 14),
+
+ /**
+ * If this flag is set, no backing memory will be allocated for the
+ * created buffer. The metadata of the buffer (e.g. size) can be
+ * queried, and the values will be equal to a buffer allocated with
+ * the same same arguments minus this flag. However, any methods
+ * which would otherwise access the underlying buffer will fail.
+ */
+ GBM_TEST_ALLOC = (1 << 15),
};
int
@@ -289,6 +292,11 @@ int
gbm_device_is_format_supported(struct gbm_device *gbm,
uint32_t format, uint32_t usage);
+int
+gbm_device_get_format_modifier_plane_count(struct gbm_device *gbm,
+ uint32_t format,
+ uint64_t modifier);
+
void
gbm_device_destroy(struct gbm_device *gbm);
@@ -304,8 +312,8 @@ struct gbm_bo *
gbm_bo_create_with_modifiers(struct gbm_device *gbm,
uint32_t width, uint32_t height,
uint32_t format,
- const uint64_t *modifiers, uint32_t count);
-
+ const uint64_t *modifiers,
+ const unsigned int count);
#define GBM_BO_IMPORT_WL_BUFFER 0x5501
#define GBM_BO_IMPORT_EGL_IMAGE 0x5502
#define GBM_BO_IMPORT_FD 0x5503
@@ -321,6 +329,8 @@ struct gbm_import_fd_data {
uint32_t format;
};
+#define GBM_MAX_PLANES 4
+
struct gbm_import_fd_modifier_data {
uint32_t width;
uint32_t height;
@@ -332,17 +342,6 @@ struct gbm_import_fd_modifier_data {
uint64_t modifier;
};
-// Deprecated. Use gbm_import_fd_modifier_data instead.
-struct gbm_import_fd_planar_data {
- int fds[GBM_MAX_PLANES];
- uint32_t width;
- uint32_t height;
- uint32_t format;
- uint32_t strides[GBM_MAX_PLANES];
- uint32_t offsets[GBM_MAX_PLANES];
- uint64_t format_modifiers[GBM_MAX_PLANES];
-};
-
struct gbm_bo *
gbm_bo_import(struct gbm_device *gbm, uint32_t type,
void *buffer, uint32_t usage);
@@ -374,11 +373,6 @@ enum gbm_bo_transfer_flags {
GBM_BO_TRANSFER_READ_WRITE = (GBM_BO_TRANSFER_READ | GBM_BO_TRANSFER_WRITE),
};
-void *
-gbm_bo_map(struct gbm_bo *bo,
- uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t flags, uint32_t *stride, void **map_data, size_t plane);
-
void
gbm_bo_unmap(struct gbm_bo *bo, void *map_data);
@@ -391,19 +385,17 @@ gbm_bo_get_height(struct gbm_bo *bo);
uint32_t
gbm_bo_get_stride(struct gbm_bo *bo);
-/* Tegra bringup hack to pass tiling parameters at EGLImage creation. */
uint32_t
-gbm_bo_get_stride_or_tiling(struct gbm_bo *bo);
+gbm_bo_get_stride_for_plane(struct gbm_bo *bo, size_t plane);
uint32_t
gbm_bo_get_format(struct gbm_bo *bo);
-/* Deprecated */
-uint64_t
-gbm_bo_get_format_modifier(struct gbm_bo *bo);
+uint32_t
+gbm_bo_get_bpp(struct gbm_bo *bo);
-uint64_t
-gbm_bo_get_modifier(struct gbm_bo *bo);
+uint32_t
+gbm_bo_get_offset(struct gbm_bo *bo, size_t plane);
struct gbm_device *
gbm_bo_get_device(struct gbm_bo *bo);
@@ -414,42 +406,17 @@ gbm_bo_get_handle(struct gbm_bo *bo);
int
gbm_bo_get_fd(struct gbm_bo *bo);
-/* Deprecated */
-size_t
-gbm_bo_get_num_planes(struct gbm_bo *bo);
+uint64_t
+gbm_bo_get_modifier(struct gbm_bo *bo);
-size_t
+int
gbm_bo_get_plane_count(struct gbm_bo *bo);
-/* Deprecated */
union gbm_bo_handle
-gbm_bo_get_plane_handle(struct gbm_bo *bo, size_t plane);
-
-union gbm_bo_handle
-gbm_bo_get_handle_for_plane(struct gbm_bo* bo, size_t plane);
+gbm_bo_get_handle_for_plane(struct gbm_bo *bo, size_t plane);
int
-gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane);
-
-/* Deprecated */
-uint32_t
-gbm_bo_get_plane_offset(struct gbm_bo *bo, size_t plane);
-
-uint32_t
-gbm_bo_get_offset(struct gbm_bo *bo, size_t plane);
-
-uint32_t
-gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane);
-
-/* Deprecated */
-uint32_t
-gbm_bo_get_plane_stride(struct gbm_bo *bo, size_t plane);
-
-uint32_t
-gbm_bo_get_stride_for_plane(struct gbm_bo *bo, size_t plane);
-
-uint64_t
-gbm_bo_get_plane_format_modifier(struct gbm_bo *bo, size_t plane);
+gbm_bo_write(struct gbm_bo *bo, const void *buf, size_t count);
void
gbm_bo_set_user_data(struct gbm_bo *bo, void *data,
@@ -466,6 +433,13 @@ gbm_surface_create(struct gbm_device *gbm,
uint32_t width, uint32_t height,
uint32_t format, uint32_t flags);
+struct gbm_surface *
+gbm_surface_create_with_modifiers(struct gbm_device *gbm,
+ uint32_t width, uint32_t height,
+ uint32_t format,
+ const uint64_t *modifiers,
+ const unsigned int count);
+
struct gbm_bo *
gbm_surface_lock_front_buffer(struct gbm_surface *surface);
@@ -478,6 +452,33 @@ gbm_surface_has_free_buffers(struct gbm_surface *surface);
void
gbm_surface_destroy(struct gbm_surface *surface);
+char *
+gbm_format_get_name(uint32_t gbm_format, struct gbm_format_name_desc *desc);
+
+
+#ifndef MINIGBM
+#define MINIGBM
+#endif
+/*
+ * The following functions are not deprecated, but not in the Mesa the gbm
+ * header. The main difference is minigbm allows for the possibility of
+ * disjoint YUV images, while Mesa GBM does not.
+ */
+uint32_t
+gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane);
+
+int
+gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane);
+
+void *
+gbm_bo_map(struct gbm_bo *bo,
+ uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+ uint32_t flags, uint32_t *stride, void **map_data, size_t plane);
+void *
+gbm_bo_map2(struct gbm_bo *bo,
+ uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+ uint32_t flags, uint32_t *stride, void **map_data, int plane);
+
#ifdef __cplusplus
}
#endif
diff --git a/chromium/third_party/minigbm/src/gbm.pc b/chromium/third_party/minigbm/src/gbm.pc
index e99ac83d2b2..a7509fcbebd 100644
--- a/chromium/third_party/minigbm/src/gbm.pc
+++ b/chromium/third_party/minigbm/src/gbm.pc
@@ -5,6 +5,6 @@ libdir=${exec_prefix}/lib
Name: libgbm
Description: A small gbm implementation
-Version: 0
+Version: 18.0.0
Cflags: -I${includedir}
Libs: -L${libdir} -lgbm
diff --git a/chromium/third_party/minigbm/src/helpers.c b/chromium/third_party/minigbm/src/helpers.c
index 992eeb74290..22a61068027 100644
--- a/chromium/third_party/minigbm/src/helpers.c
+++ b/chromium/third_party/minigbm/src/helpers.c
@@ -11,7 +11,6 @@
#include <string.h>
#include <sys/mman.h>
#include <xf86drm.h>
-#include <xf86drmMode.h>
#include "drv_priv.h"
#include "helpers.h"
@@ -54,6 +53,13 @@ static const struct planar_layout packed_4bpp_layout = {
.bytes_per_pixel = { 4 }
};
+static const struct planar_layout packed_8bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 8 }
+};
+
static const struct planar_layout biplanar_yuv_420_layout = {
.num_planes = 2,
.horizontal_subsampling = { 1, 2 },
@@ -121,6 +127,7 @@ static const struct planar_layout *layout_from_format(uint32_t format)
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_MTISP_SXYZW10:
return &packed_2bpp_layout;
case DRM_FORMAT_BGR888:
@@ -146,6 +153,9 @@ static const struct planar_layout *layout_from_format(uint32_t format)
case DRM_FORMAT_XRGB8888:
return &packed_4bpp_layout;
+ case DRM_FORMAT_ABGR16161616F:
+ return &packed_8bpp_layout;
+
default:
drv_log("UNKNOWN FORMAT %d\n", format);
return NULL;
@@ -166,6 +176,20 @@ size_t drv_num_planes_from_format(uint32_t format)
return layout ? layout->num_planes : 0;
}
+size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier)
+{
+ size_t planes = drv_num_planes_from_format(format);
+
+ /* Disallow unsupported formats. */
+ if (!planes)
+ return 0;
+
+ if (drv->backend->num_planes_from_modifier && modifier != DRM_FORMAT_MOD_INVALID)
+ return drv->backend->num_planes_from_modifier(drv, format, modifier);
+
+ return planes;
+}
+
uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
{
const struct planar_layout *layout = layout_from_format(format);
@@ -175,6 +199,15 @@ uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
return DIV_ROUND_UP(height, layout->vertical_subsampling[plane]);
}
+uint32_t drv_vertical_subsampling_from_format(uint32_t format, size_t plane)
+{
+ const struct planar_layout *layout = layout_from_format(format);
+
+ assert(plane < layout->num_planes);
+
+ return layout->vertical_subsampling[plane];
+}
+
uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane)
{
const struct planar_layout *layout = layout_from_format(format);
@@ -231,7 +264,13 @@ static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
*/
int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format)
{
+ uint32_t padding[DRV_MAX_PLANES] = { 0 };
+ return drv_bo_from_format_and_padding(bo, stride, aligned_height, format, padding);
+}
+int drv_bo_from_format_and_padding(struct bo *bo, uint32_t stride, uint32_t aligned_height,
+ uint32_t format, uint32_t padding[DRV_MAX_PLANES])
+{
size_t p, num_planes;
uint32_t offset = 0;
@@ -245,23 +284,25 @@ int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height,
* is 32 bytes aligned.
*/
if (format == DRM_FORMAT_YVU420_ANDROID) {
- assert(aligned_height == bo->height);
+ assert(aligned_height == bo->meta.height);
assert(stride == ALIGN(stride, 32));
}
for (p = 0; p < num_planes; p++) {
- bo->strides[p] = subsample_stride(stride, format, p);
- bo->sizes[p] = drv_size_from_format(format, bo->strides[p], aligned_height, p);
- bo->offsets[p] = offset;
- offset += bo->sizes[p];
+ bo->meta.strides[p] = subsample_stride(stride, format, p);
+ bo->meta.sizes[p] =
+ drv_size_from_format(format, bo->meta.strides[p], aligned_height, p) +
+ padding[p];
+ bo->meta.offsets[p] = offset;
+ offset += bo->meta.sizes[p];
}
- bo->total_size = offset;
+ bo->meta.total_size = offset;
return 0;
}
-int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags, uint64_t quirks)
{
int ret;
size_t plane;
@@ -279,7 +320,7 @@ int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t
*
* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not
* be aligned. */
- aligned_height = 3 * DIV_ROUND_UP(bo->height, 2);
+ aligned_height = 3 * DIV_ROUND_UP(bo->meta.height, 2);
break;
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
@@ -291,9 +332,15 @@ int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t
}
memset(&create_dumb, 0, sizeof(create_dumb));
- create_dumb.height = aligned_height;
+ if (quirks & BO_QUIRK_DUMB32BPP) {
+ aligned_width =
+ DIV_ROUND_UP(aligned_width * layout_from_format(format)->bytes_per_pixel[0], 4);
+ create_dumb.bpp = 32;
+ } else {
+ create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8;
+ }
create_dumb.width = aligned_width;
- create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8;
+ create_dumb.height = aligned_height;
create_dumb.flags = 0;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
@@ -304,13 +351,19 @@ int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t
drv_bo_from_format(bo, create_dumb.pitch, height, format);
- for (plane = 0; plane < bo->num_planes; plane++)
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
bo->handles[plane].u32 = create_dumb.handle;
- bo->total_size = create_dumb.size;
+ bo->meta.total_size = create_dumb.size;
return 0;
}
+int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_NONE);
+}
+
int drv_dumb_bo_destroy(struct bo *bo)
{
struct drm_mode_destroy_dumb destroy_dumb;
@@ -334,7 +387,7 @@ int drv_gem_bo_destroy(struct bo *bo)
int ret, error = 0;
size_t plane, i;
- for (plane = 0; plane < bo->num_planes; plane++) {
+ for (plane = 0; plane < bo->meta.num_planes; plane++) {
for (i = 0; i < plane; i++)
if (bo->handles[i].u32 == bo->handles[plane].u32)
break;
@@ -362,7 +415,7 @@ int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
size_t plane;
struct drm_prime_handle prime_handle;
- for (plane = 0; plane < bo->num_planes; plane++) {
+ for (plane = 0; plane < bo->meta.num_planes; plane++) {
memset(&prime_handle, 0, sizeof(prime_handle));
prime_handle.fd = data->fds[plane];
@@ -377,7 +430,7 @@ int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
* plane that failed, so GEM close will be called on
* planes before that plane.
*/
- bo->num_planes = plane;
+ bo->meta.num_planes = plane;
drv_gem_bo_destroy(bo);
return -errno;
}
@@ -403,9 +456,9 @@ void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map
return MAP_FAILED;
}
- for (i = 0; i < bo->num_planes; i++)
+ for (i = 0; i < bo->meta.num_planes; i++)
if (bo->handles[i].u32 == bo->handles[plane].u32)
- vma->length += bo->sizes[i];
+ vma->length += bo->meta.sizes[i];
return mmap(0, vma->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
map_dumb.offset);
@@ -429,7 +482,7 @@ int drv_mapping_destroy(struct bo *bo)
*/
idx = 0;
- for (plane = 0; plane < bo->num_planes; plane++) {
+ for (plane = 0; plane < bo->meta.num_planes; plane++) {
while (idx < drv_array_size(bo->drv->mappings)) {
mapping = (struct mapping *)drv_array_at_idx(bo->drv->mappings, idx);
if (mapping->vma->handle != bo->handles[plane].u32) {
@@ -528,137 +581,16 @@ void drv_modify_combination(struct driver *drv, uint32_t format, struct format_m
}
}
-struct drv_array *drv_query_kms(struct driver *drv)
-{
- struct drv_array *kms_items;
- uint64_t plane_type = UINT64_MAX;
- uint64_t use_flag;
- uint32_t i, j, k;
-
- drmModePlanePtr plane;
- drmModePropertyPtr prop;
- drmModePlaneResPtr resources;
- drmModeObjectPropertiesPtr props;
-
- kms_items = drv_array_init(sizeof(struct kms_item));
- if (!kms_items)
- goto out;
-
- /*
- * The ability to return universal planes is only complete on
- * ChromeOS kernel versions >= v3.18. The SET_CLIENT_CAP ioctl
- * therefore might return an error code, so don't check it. If it
- * fails, it'll just return the plane list as overlay planes, which is
- * fine in our case (our drivers already have cursor bits set).
- * modetest in libdrm does the same thing.
- */
- drmSetClientCap(drv->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
-
- resources = drmModeGetPlaneResources(drv->fd);
- if (!resources)
- goto out;
-
- for (i = 0; i < resources->count_planes; i++) {
- plane_type = UINT64_MAX;
- plane = drmModeGetPlane(drv->fd, resources->planes[i]);
- if (!plane)
- goto out;
-
- props = drmModeObjectGetProperties(drv->fd, plane->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!props)
- goto out;
-
- for (j = 0; j < props->count_props; j++) {
- prop = drmModeGetProperty(drv->fd, props->props[j]);
- if (prop) {
- if (strcmp(prop->name, "type") == 0) {
- plane_type = props->prop_values[j];
- }
-
- drmModeFreeProperty(prop);
- }
- }
-
- switch (plane_type) {
- case DRM_PLANE_TYPE_OVERLAY:
- case DRM_PLANE_TYPE_PRIMARY:
- use_flag = BO_USE_SCANOUT;
- break;
- case DRM_PLANE_TYPE_CURSOR:
- use_flag = BO_USE_CURSOR;
- break;
- default:
- assert(0);
- }
-
- for (j = 0; j < plane->count_formats; j++) {
- bool found = false;
- for (k = 0; k < drv_array_size(kms_items); k++) {
- struct kms_item *item = drv_array_at_idx(kms_items, k);
- if (item->format == plane->formats[j] &&
- item->modifier == DRM_FORMAT_MOD_LINEAR) {
- item->use_flags |= use_flag;
- found = true;
- break;
- }
- }
-
- if (!found) {
- struct kms_item item = { .format = plane->formats[j],
- .modifier = DRM_FORMAT_MOD_LINEAR,
- .use_flags = use_flag };
-
- drv_array_append(kms_items, &item);
- }
- }
-
- drmModeFreeObjectProperties(props);
- drmModeFreePlane(plane);
- }
-
- drmModeFreePlaneResources(resources);
-out:
- if (kms_items && !drv_array_size(kms_items)) {
- drv_array_destroy(kms_items);
- return NULL;
- }
-
- return kms_items;
-}
-
int drv_modify_linear_combinations(struct driver *drv)
{
- uint32_t i, j;
- struct kms_item *item;
- struct combination *combo;
- struct drv_array *kms_items;
-
/*
* All current drivers can scanout linear XRGB8888/ARGB8888 as a primary
- * plane and as a cursor. Some drivers don't support
- * drmModeGetPlaneResources, so add the combination here. Note that the
- * kernel disregards the alpha component of ARGB unless it's an overlay
- * plane.
+ * plane and as a cursor.
*/
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
BO_USE_CURSOR | BO_USE_SCANOUT);
-
- kms_items = drv_query_kms(drv);
- if (!kms_items)
- return 0;
-
- for (i = 0; i < drv_array_size(kms_items); i++) {
- item = (struct kms_item *)drv_array_at_idx(kms_items, i);
- for (j = 0; j < drv_array_size(drv->combos); j++) {
- combo = drv_array_at_idx(drv->combos, j);
- if (item->format == combo->format)
- combo->use_flags |= BO_USE_SCANOUT;
- }
- }
-
- drv_array_destroy(kms_items);
return 0;
}
diff --git a/chromium/third_party/minigbm/src/helpers.h b/chromium/third_party/minigbm/src/helpers.h
index c09d2c24a62..19d0fd7476a 100644
--- a/chromium/third_party/minigbm/src/helpers.h
+++ b/chromium/third_party/minigbm/src/helpers.h
@@ -13,10 +13,15 @@
#include "helpers_array.h"
uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane);
+uint32_t drv_vertical_subsampling_from_format(uint32_t format, size_t plane);
uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane);
int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format);
+int drv_bo_from_format_and_padding(struct bo *bo, uint32_t stride, uint32_t aligned_height,
+ uint32_t format, uint32_t padding[DRV_MAX_PLANES]);
int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags);
+int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags, uint64_t quirks);
int drv_dumb_bo_destroy(struct bo *bo);
int drv_gem_bo_destroy(struct bo *bo);
int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data);
@@ -33,7 +38,6 @@ void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t
struct format_metadata *metadata, uint64_t usage);
void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
uint64_t usage);
-struct drv_array *drv_query_kms(struct driver *drv);
int drv_modify_linear_combinations(struct driver *drv);
uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count,
const uint64_t *modifier_order, uint32_t order_count);
diff --git a/chromium/third_party/minigbm/src/i915.c b/chromium/third_party/minigbm/src/i915.c
index 9683bf0a2d0..3ed785a0bd0 100644
--- a/chromium/third_party/minigbm/src/i915.c
+++ b/chromium/third_party/minigbm/src/i915.c
@@ -23,17 +23,16 @@
#define I915_CACHELINE_SIZE 64
#define I915_CACHELINE_MASK (I915_CACHELINE_SIZE - 1)
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
- DRM_FORMAT_XBGR2101010, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB1555, DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB2101010 };
+static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB2101010, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XRGB8888 };
-static const uint32_t tileable_texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8,
- DRM_FORMAT_UYVY, DRM_FORMAT_YUYV };
+static const uint32_t render_formats[] = { DRM_FORMAT_ABGR16161616F };
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID,
- DRM_FORMAT_NV12, DRM_FORMAT_P010 };
+static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010,
+ DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
struct i915_device {
uint32_t gen;
@@ -52,108 +51,49 @@ static uint32_t i915_get_gen(int device_id)
return 4;
}
-/*
- * We allow allocation of ARGB formats for SCANOUT if the corresponding XRGB
- * formats supports it. It's up to the caller (chrome ozone) to ultimately not
- * scan out ARGB if the display controller only supports XRGB, but we'll allow
- * the allocation of the bo here.
- */
-static bool format_compatible(const struct combination *combo, uint32_t format)
+static uint64_t unset_flags(uint64_t current_flags, uint64_t mask)
{
- if (combo->format == format)
- return true;
-
- switch (format) {
- case DRM_FORMAT_XRGB8888:
- return combo->format == DRM_FORMAT_ARGB8888;
- case DRM_FORMAT_XBGR8888:
- return combo->format == DRM_FORMAT_ABGR8888;
- case DRM_FORMAT_RGBX8888:
- return combo->format == DRM_FORMAT_RGBA8888;
- case DRM_FORMAT_BGRX8888:
- return combo->format == DRM_FORMAT_BGRA8888;
- default:
- return false;
- }
-}
-
-static int i915_add_kms_item(struct driver *drv, const struct kms_item *item)
-{
- uint32_t i;
- struct combination *combo;
-
- /*
- * Older hardware can't scanout Y-tiled formats. Newer devices can, and
- * report this functionality via format modifiers.
- */
- for (i = 0; i < drv_array_size(drv->combos); i++) {
- combo = (struct combination *)drv_array_at_idx(drv->combos, i);
- if (!format_compatible(combo, item->format))
- continue;
-
- if (item->modifier == DRM_FORMAT_MOD_LINEAR &&
- combo->metadata.tiling == I915_TILING_X) {
- /*
- * FIXME: drv_query_kms() does not report the available modifiers
- * yet, but we know that all hardware can scanout from X-tiled
- * buffers, so let's add this to our combinations, except for
- * cursor, which must not be tiled.
- */
- combo->use_flags |= item->use_flags & ~BO_USE_CURSOR;
- }
-
- /* If we can scanout NV12, we support all tiling modes. */
- if (item->format == DRM_FORMAT_NV12)
- combo->use_flags |= item->use_flags;
-
- if (combo->metadata.modifier == item->modifier)
- combo->use_flags |= item->use_flags;
- }
-
- return 0;
+ uint64_t value = current_flags & ~mask;
+ return value;
}
static int i915_add_combinations(struct driver *drv)
{
- int ret;
- uint32_t i;
- struct drv_array *kms_items;
struct format_metadata metadata;
- uint64_t render_use_flags, texture_use_flags;
+ uint64_t render, scanout_and_render, texture_only;
- render_use_flags = BO_USE_RENDER_MASK;
- texture_use_flags = BO_USE_TEXTURE_MASK;
+ scanout_and_render = BO_USE_RENDER_MASK | BO_USE_SCANOUT;
+ render = BO_USE_RENDER_MASK;
+ texture_only = BO_USE_TEXTURE_MASK;
+ uint64_t linear_mask = BO_USE_RENDERSCRIPT | BO_USE_LINEAR | BO_USE_PROTECTED |
+ BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN;
metadata.tiling = I915_TILING_NONE;
metadata.priority = 1;
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, render_use_flags);
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &metadata, scanout_and_render);
- drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &metadata, texture_use_flags);
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
- drv_add_combinations(drv, tileable_texture_source_formats,
- ARRAY_SIZE(tileable_texture_source_formats), &metadata,
- texture_use_flags);
+ drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
+ texture_only);
+ drv_modify_linear_combinations(drv);
/*
* Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
* Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
*/
drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_ENCODER);
+ /* IPU3 camera ISP supports only NV12 output. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
- drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
-
- /* IPU3 camera ISP supports only NV12 output. */
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
* from camera.
@@ -161,59 +101,38 @@ static int i915_add_combinations(struct driver *drv)
drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
- render_use_flags &= ~BO_USE_RENDERSCRIPT;
- render_use_flags &= ~BO_USE_SW_WRITE_OFTEN;
- render_use_flags &= ~BO_USE_SW_READ_OFTEN;
- render_use_flags &= ~BO_USE_LINEAR;
- render_use_flags &= ~BO_USE_PROTECTED;
-
- texture_use_flags &= ~BO_USE_RENDERSCRIPT;
- texture_use_flags &= ~BO_USE_SW_WRITE_OFTEN;
- texture_use_flags &= ~BO_USE_SW_READ_OFTEN;
- texture_use_flags &= ~BO_USE_LINEAR;
- texture_use_flags &= ~BO_USE_PROTECTED;
+ render = unset_flags(render, linear_mask);
+ scanout_and_render = unset_flags(scanout_and_render, linear_mask);
metadata.tiling = I915_TILING_X;
metadata.priority = 2;
metadata.modifier = I915_FORMAT_MOD_X_TILED;
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, render_use_flags);
-
- drv_add_combinations(drv, tileable_texture_source_formats,
- ARRAY_SIZE(tileable_texture_source_formats), &metadata,
- texture_use_flags);
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &metadata, scanout_and_render);
metadata.tiling = I915_TILING_Y;
metadata.priority = 3;
metadata.modifier = I915_FORMAT_MOD_Y_TILED;
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, render_use_flags);
-
- drv_add_combinations(drv, tileable_texture_source_formats,
- ARRAY_SIZE(tileable_texture_source_formats), &metadata,
- texture_use_flags);
-
- /* Support y-tiled NV12 and P010 for libva */
+ scanout_and_render =
+ unset_flags(scanout_and_render, BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY);
+/* Support y-tiled NV12 and P010 for libva */
+#ifdef I915_SCANOUT_Y_TILED
+ drv_add_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT);
+#else
drv_add_combination(drv, DRM_FORMAT_NV12, &metadata,
BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER);
+#endif
+ scanout_and_render = unset_flags(scanout_and_render, BO_USE_SCANOUT);
drv_add_combination(drv, DRM_FORMAT_P010, &metadata,
BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER);
- kms_items = drv_query_kms(drv);
- if (!kms_items)
- return 0;
-
- for (i = 0; i < drv_array_size(kms_items); i++) {
- ret = i915_add_kms_item(drv, (struct kms_item *)drv_array_at_idx(kms_items, i));
- if (ret) {
- drv_array_destroy(kms_items);
- return ret;
- }
- }
-
- drv_array_destroy(kms_items);
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &metadata, scanout_and_render);
return 0;
}
@@ -254,7 +173,7 @@ static int i915_align_dimensions(struct bo *bo, uint32_t tiling, uint32_t *strid
break;
}
- *aligned_height = ALIGN(bo->height, vertical_alignment);
+ *aligned_height = ALIGN(*aligned_height, vertical_alignment);
if (i915->gen > 3) {
*stride = ALIGN(*stride, horizontal_alignment);
} else {
@@ -332,45 +251,59 @@ static int i915_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, u
uint32_t stride = drv_stride_from_format(format, width, plane);
uint32_t plane_height = drv_height_from_format(format, height, plane);
- if (bo->tiling != I915_TILING_NONE)
+ if (bo->meta.tiling != I915_TILING_NONE)
assert(IS_ALIGNED(offset, pagesize));
- ret = i915_align_dimensions(bo, bo->tiling, &stride, &plane_height);
+ ret = i915_align_dimensions(bo, bo->meta.tiling, &stride, &plane_height);
if (ret)
return ret;
- bo->strides[plane] = stride;
- bo->sizes[plane] = stride * plane_height;
- bo->offsets[plane] = offset;
- offset += bo->sizes[plane];
+ bo->meta.strides[plane] = stride;
+ bo->meta.sizes[plane] = stride * plane_height;
+ bo->meta.offsets[plane] = offset;
+ offset += bo->meta.sizes[plane];
}
- bo->total_size = ALIGN(offset, pagesize);
+ bo->meta.total_size = ALIGN(offset, pagesize);
return 0;
}
-static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height,
- uint32_t format, uint64_t modifier)
+static int i915_bo_compute_metadata(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags, const uint64_t *modifiers, uint32_t count)
{
- int ret;
- size_t plane;
- struct drm_i915_gem_create gem_create;
- struct drm_i915_gem_set_tiling gem_set_tiling;
+ static const uint64_t modifier_order[] = {
+ I915_FORMAT_MOD_Y_TILED_CCS,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ };
+ uint64_t modifier;
+
+ if (modifiers) {
+ modifier =
+ drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order));
+ } else {
+ struct combination *combo = drv_get_combination(bo->drv, format, use_flags);
+ if (!combo)
+ return -EINVAL;
+ modifier = combo->metadata.modifier;
+ }
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
- bo->tiling = I915_TILING_NONE;
+ bo->meta.tiling = I915_TILING_NONE;
break;
case I915_FORMAT_MOD_X_TILED:
- bo->tiling = I915_TILING_X;
+ bo->meta.tiling = I915_TILING_X;
break;
case I915_FORMAT_MOD_Y_TILED:
- bo->tiling = I915_TILING_Y;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ bo->meta.tiling = I915_TILING_Y;
break;
}
- bo->format_modifiers[0] = modifier;
+ bo->meta.format_modifiers[0] = modifier;
if (format == DRM_FORMAT_YVU420_ANDROID) {
/*
@@ -383,12 +316,62 @@ static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t h
*/
uint32_t stride = ALIGN(width, 32);
drv_bo_from_format(bo, stride, height, format);
+ } else if (modifier == I915_FORMAT_MOD_Y_TILED_CCS) {
+ /*
+ * For compressed surfaces, we need a color control surface
+ * (CCS). Color compression is only supported for Y tiled
+ * surfaces, and for each 32x16 tiles in the main surface we
+ * need a tile in the control surface. Y tiles are 128 bytes
+ * wide and 32 lines tall and we use that to first compute the
+ * width and height in tiles of the main surface. stride and
+ * height are already multiples of 128 and 32, respectively:
+ */
+ uint32_t stride = drv_stride_from_format(format, width, 0);
+ uint32_t width_in_tiles = DIV_ROUND_UP(stride, 128);
+ uint32_t height_in_tiles = DIV_ROUND_UP(height, 32);
+ uint32_t size = width_in_tiles * height_in_tiles * 4096;
+ uint32_t offset = 0;
+
+ bo->meta.strides[0] = width_in_tiles * 128;
+ bo->meta.sizes[0] = size;
+ bo->meta.offsets[0] = offset;
+ offset += size;
+
+ /*
+ * Now, compute the width and height in tiles of the control
+ * surface by dividing and rounding up.
+ */
+ uint32_t ccs_width_in_tiles = DIV_ROUND_UP(width_in_tiles, 32);
+ uint32_t ccs_height_in_tiles = DIV_ROUND_UP(height_in_tiles, 16);
+ uint32_t ccs_size = ccs_width_in_tiles * ccs_height_in_tiles * 4096;
+
+ /*
+ * With stride and height aligned to y tiles, offset is
+ * already a multiple of 4096, which is the required alignment
+ * of the CCS.
+ */
+ bo->meta.strides[1] = ccs_width_in_tiles * 128;
+ bo->meta.sizes[1] = ccs_size;
+ bo->meta.offsets[1] = offset;
+ offset += ccs_size;
+
+ bo->meta.num_planes = 2;
+ bo->meta.total_size = offset;
} else {
i915_bo_from_format(bo, width, height, format);
}
+ return 0;
+}
+
+static int i915_bo_create_from_metadata(struct bo *bo)
+{
+ int ret;
+ size_t plane;
+ struct drm_i915_gem_create gem_create;
+ struct drm_i915_gem_set_tiling gem_set_tiling;
memset(&gem_create, 0, sizeof(gem_create));
- gem_create.size = bo->total_size;
+ gem_create.size = bo->meta.total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
if (ret) {
@@ -396,13 +379,13 @@ static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t h
return -errno;
}
- for (plane = 0; plane < bo->num_planes; plane++)
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
bo->handles[plane].u32 = gem_create.handle;
memset(&gem_set_tiling, 0, sizeof(gem_set_tiling));
gem_set_tiling.handle = bo->handles[0].u32;
- gem_set_tiling.tiling_mode = bo->tiling;
- gem_set_tiling.stride = bo->strides[0];
+ gem_set_tiling.tiling_mode = bo->meta.tiling;
+ gem_set_tiling.stride = bo->meta.strides[0];
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_TILING, &gem_set_tiling);
if (ret) {
@@ -418,33 +401,6 @@ static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t h
return 0;
}
-static int i915_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
-{
- struct combination *combo;
-
- combo = drv_get_combination(bo->drv, format, use_flags);
- if (!combo)
- return -EINVAL;
-
- return i915_bo_create_for_modifier(bo, width, height, format, combo->metadata.modifier);
-}
-
-static int i915_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
- uint32_t format, const uint64_t *modifiers, uint32_t count)
-{
- static const uint64_t modifier_order[] = {
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- };
- uint64_t modifier;
-
- modifier = drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order));
-
- return i915_bo_create_for_modifier(bo, width, height, format, modifier);
-}
-
static void i915_close(struct driver *drv)
{
free(drv->priv);
@@ -471,7 +427,7 @@ static int i915_bo_import(struct bo *bo, struct drv_import_fd_data *data)
return ret;
}
- bo->tiling = gem_get_tiling.tiling_mode;
+ bo->meta.tiling = gem_get_tiling.tiling_mode;
return 0;
}
@@ -480,7 +436,10 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
int ret;
void *addr;
- if (bo->tiling == I915_TILING_NONE) {
+ if (bo->meta.format_modifiers[0] == I915_FORMAT_MOD_Y_TILED_CCS)
+ return MAP_FAILED;
+
+ if (bo->meta.tiling == I915_TILING_NONE) {
struct drm_i915_gem_mmap gem_map;
memset(&gem_map, 0, sizeof(gem_map));
@@ -492,14 +451,14 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
* For now, care must be taken not to use WC mappings for
* Renderscript and camera use cases, as they're
* performance-sensitive. */
- if ((bo->use_flags & BO_USE_SCANOUT) &&
- !(bo->use_flags &
+ if ((bo->meta.use_flags & BO_USE_SCANOUT) &&
+ !(bo->meta.use_flags &
(BO_USE_RENDERSCRIPT | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)))
gem_map.flags = I915_MMAP_WC;
gem_map.handle = bo->handles[0].u32;
gem_map.offset = 0;
- gem_map.size = bo->total_size;
+ gem_map.size = bo->meta.total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_map);
if (ret) {
@@ -520,8 +479,8 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
return MAP_FAILED;
}
- addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
- gem_map.offset);
+ addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED,
+ bo->drv->fd, gem_map.offset);
}
if (addr == MAP_FAILED) {
@@ -529,7 +488,7 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
return addr;
}
- vma->length = bo->total_size;
+ vma->length = bo->meta.total_size;
return addr;
}
@@ -540,7 +499,7 @@ static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping)
memset(&set_domain, 0, sizeof(set_domain));
set_domain.handle = bo->handles[0].u32;
- if (bo->tiling == I915_TILING_NONE) {
+ if (bo->meta.tiling == I915_TILING_NONE) {
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
if (mapping->vma->map_flags & BO_MAP_WRITE)
set_domain.write_domain = I915_GEM_DOMAIN_CPU;
@@ -562,7 +521,7 @@ static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping)
static int i915_bo_flush(struct bo *bo, struct mapping *mapping)
{
struct i915_device *i915 = bo->drv->priv;
- if (!i915->has_llc && bo->tiling == I915_TILING_NONE)
+ if (!i915->has_llc && bo->meta.tiling == I915_TILING_NONE)
i915_clflush(mapping->vma->addr, mapping->vma->length);
return 0;
@@ -597,8 +556,8 @@ const struct backend backend_i915 = {
.name = "i915",
.init = i915_init,
.close = i915_close,
- .bo_create = i915_bo_create,
- .bo_create_with_modifiers = i915_bo_create_with_modifiers,
+ .bo_compute_metadata = i915_bo_compute_metadata,
+ .bo_create_from_metadata = i915_bo_create_from_metadata,
.bo_destroy = drv_gem_bo_destroy,
.bo_import = i915_bo_import,
.bo_map = i915_bo_map,
diff --git a/chromium/third_party/minigbm/src/mediatek.c b/chromium/third_party/minigbm/src/mediatek.c
index 199d3cfb741..cdfc9ab73ae 100644
--- a/chromium/third_party/minigbm/src/mediatek.c
+++ b/chromium/third_party/minigbm/src/mediatek.c
@@ -9,6 +9,7 @@
// clang-format off
#include <errno.h>
#include <fcntl.h>
+#include <inttypes.h>
#include <poll.h>
#include <stdio.h>
#include <string.h>
@@ -35,12 +36,12 @@ static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMA
DRM_FORMAT_XRGB8888 };
#ifdef MTK_MT8183
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV21,
- DRM_FORMAT_NV12, DRM_FORMAT_YUYV,
- DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV21, DRM_FORMAT_NV12,
+ DRM_FORMAT_YUYV, DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU420_ANDROID };
#else
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
- DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_NV12 };
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID,
+ DRM_FORMAT_NV12 };
#endif
static int mediatek_init(struct driver *drv)
@@ -48,11 +49,13 @@ static int mediatek_init(struct driver *drv)
struct format_metadata metadata;
drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
+ &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
&LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+ drv_add_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
+ BO_USE_SW_MASK | BO_USE_LINEAR | BO_USE_PROTECTED);
/*
* Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
* Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
@@ -83,6 +86,9 @@ static int mediatek_init(struct driver *drv)
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ /* Private formats for private reprocessing in camera */
+ drv_add_combination(drv, DRM_FORMAT_MTISP_SXYZW10, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SW_MASK);
#endif
return drv_modify_linear_combinations(drv);
@@ -109,18 +115,41 @@ static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
*/
stride = drv_stride_from_format(format, width, 0);
stride = ALIGN(stride, 64);
- drv_bo_from_format(bo, stride, height, format);
+
+ if (bo->meta.use_flags & BO_USE_HW_VIDEO_ENCODER) {
+ uint32_t aligned_height = ALIGN(height, 32);
+ uint32_t padding[DRV_MAX_PLANES] = { 0 };
+
+ for (plane = 0; plane < bo->meta.num_planes; ++plane) {
+ uint32_t plane_stride = drv_stride_from_format(format, stride, plane);
+ padding[plane] = plane_stride *
+ (32 / drv_vertical_subsampling_from_format(format, plane));
+ }
+
+ drv_bo_from_format_and_padding(bo, stride, aligned_height, format, padding);
+ } else {
+#ifdef MTK_MT8183
+ /*
+ * JPEG Encoder Accelerator requires 16x16 alignment. We want the buffer
+ * from camera can be put in JEA directly so align the height to 16
+ * bytes.
+ */
+ if (format == DRM_FORMAT_NV12)
+ height = ALIGN(height, 16);
+#endif
+ drv_bo_from_format(bo, stride, height, format);
+ }
memset(&gem_create, 0, sizeof(gem_create));
- gem_create.size = bo->total_size;
+ gem_create.size = bo->meta.total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_CREATE, &gem_create);
if (ret) {
- drv_log("DRM_IOCTL_MTK_GEM_CREATE failed (size=%llu)\n", gem_create.size);
+ drv_log("DRM_IOCTL_MTK_GEM_CREATE failed (size=%" PRIu64 ")\n", gem_create.size);
return -errno;
}
- for (plane = 0; plane < bo->num_planes; plane++)
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
bo->handles[plane].u32 = gem_create.handle;
return 0;
@@ -149,23 +178,23 @@ static void *mediatek_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint3
return MAP_FAILED;
}
- ret = drmPrimeHandleToFD(bo->drv->fd, gem_map.handle, DRM_CLOEXEC, &prime_fd);
- if (ret) {
+ prime_fd = drv_bo_get_plane_fd(bo, 0);
+ if (prime_fd < 0) {
drv_log("Failed to get a prime fd\n");
return MAP_FAILED;
}
- void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ void *addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
- vma->length = bo->total_size;
+ vma->length = bo->meta.total_size;
priv = calloc(1, sizeof(*priv));
priv->prime_fd = prime_fd;
vma->priv = priv;
- if (bo->use_flags & BO_USE_RENDERSCRIPT) {
- priv->cached_addr = calloc(1, bo->total_size);
+ if (bo->meta.use_flags & BO_USE_RENDERSCRIPT) {
+ priv->cached_addr = calloc(1, bo->meta.total_size);
priv->gem_addr = addr;
addr = priv->cached_addr;
}
@@ -211,7 +240,7 @@ static int mediatek_bo_invalidate(struct bo *bo, struct mapping *mapping)
drv_log("poll prime_fd failed\n");
if (priv->cached_addr)
- memcpy(priv->cached_addr, priv->gem_addr, bo->total_size);
+ memcpy(priv->cached_addr, priv->gem_addr, bo->meta.total_size);
}
return 0;
@@ -221,7 +250,7 @@ static int mediatek_bo_flush(struct bo *bo, struct mapping *mapping)
{
struct mediatek_private_map_data *priv = mapping->vma->priv;
if (priv && priv->cached_addr && (mapping->vma->map_flags & BO_MAP_WRITE))
- memcpy(priv->gem_addr, priv->cached_addr, bo->total_size);
+ memcpy(priv->gem_addr, priv->cached_addr, bo->meta.total_size);
return 0;
}
@@ -231,17 +260,25 @@ static uint32_t mediatek_resolve_format(struct driver *drv, uint32_t format, uin
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
#ifdef MTK_MT8183
- /* Only for MT8183 Camera subsystem requires NV12. */
- if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
+ /* Only MT8183 Camera subsystem offers private reprocessing
+ * capability. CAMERA_READ indicates the buffer is intended for
+ * reprocessing and hence given the private format for MTK. */
+ if (use_flags & BO_USE_CAMERA_READ)
+ return DRM_FORMAT_MTISP_SXYZW10;
+ /* For non-reprocessing uses, only MT8183 Camera subsystem
+ * requires NV12. */
+ else if (use_flags & BO_USE_CAMERA_WRITE)
return DRM_FORMAT_NV12;
#endif
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
#ifdef MTK_MT8183
- /* Only for MT8183 Camera subsystem requires NV12 */
- if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
+ /* MT8183 camera and decoder subsystems require NV12. */
+ if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER)) {
return DRM_FORMAT_NV12;
+ }
#endif
return DRM_FORMAT_YVU420;
default:
diff --git a/chromium/third_party/minigbm/src/meson.c b/chromium/third_party/minigbm/src/meson.c
index 523bf710ecd..f82c57a685a 100644
--- a/chromium/third_party/minigbm/src/meson.c
+++ b/chromium/third_party/minigbm/src/meson.c
@@ -10,12 +10,14 @@
#include "helpers.h"
#include "util.h"
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_BGR888, DRM_FORMAT_BGR565};
static int meson_init(struct driver *drv)
{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
return drv_modify_linear_combinations(drv);
}
diff --git a/chromium/third_party/minigbm/src/msm.c b/chromium/third_party/minigbm/src/msm.c
index a8df00000ce..fac1fd06d7d 100644
--- a/chromium/third_party/minigbm/src/msm.c
+++ b/chromium/third_party/minigbm/src/msm.c
@@ -69,13 +69,13 @@ static void msm_calculate_layout(struct bo *bo)
{
uint32_t width, height;
- width = bo->width;
- height = bo->height;
+ width = bo->meta.width;
+ height = bo->meta.height;
/* NV12 format requires extra padding with platform
* specific alignments for venus driver
*/
- if (bo->format == DRM_FORMAT_NV12) {
+ if (bo->meta.format == DRM_FORMAT_NV12) {
uint32_t y_stride, uv_stride, y_scanline, uv_scanline, y_plane, uv_plane, size,
extra_padding;
@@ -86,7 +86,7 @@ static void msm_calculate_layout(struct bo *bo)
y_plane = y_stride * y_scanline;
uv_plane = uv_stride * uv_scanline;
- if (bo->tiling == MSM_UBWC_TILING) {
+ if (bo->meta.tiling == MSM_UBWC_TILING) {
y_plane += get_ubwc_meta_size(width, height, 32, 8);
uv_plane += get_ubwc_meta_size(width >> 1, height >> 1, 16, 8);
extra_padding = NV12_UBWC_PADDING(y_stride);
@@ -94,34 +94,37 @@ static void msm_calculate_layout(struct bo *bo)
extra_padding = NV12_LINEAR_PADDING;
}
- bo->strides[0] = y_stride;
- bo->sizes[0] = y_plane;
- bo->offsets[1] = y_plane;
- bo->strides[1] = uv_stride;
+ bo->meta.strides[0] = y_stride;
+ bo->meta.sizes[0] = y_plane;
+ bo->meta.offsets[1] = y_plane;
+ bo->meta.strides[1] = uv_stride;
size = y_plane + uv_plane + extra_padding;
- bo->total_size = ALIGN(size, BUFFER_SIZE_ALIGN);
- bo->sizes[1] = bo->total_size - bo->sizes[0];
+ bo->meta.total_size = ALIGN(size, BUFFER_SIZE_ALIGN);
+ bo->meta.sizes[1] = bo->meta.total_size - bo->meta.sizes[0];
} else {
uint32_t stride, alignw, alignh;
alignw = ALIGN(width, DEFAULT_ALIGNMENT);
- /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. */
- if (bo->format == DRM_FORMAT_YVU420_ANDROID) {
+ /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned.
+ DRM_FORMAT_R8 of height one is used for JPEG camera output, so don't
+ height align that. */
+ if (bo->meta.format == DRM_FORMAT_YVU420_ANDROID ||
+ (bo->meta.format == DRM_FORMAT_R8 && height == 1)) {
alignh = height;
} else {
alignh = ALIGN(height, DEFAULT_ALIGNMENT);
}
- stride = drv_stride_from_format(bo->format, alignw, 0);
+ stride = drv_stride_from_format(bo->meta.format, alignw, 0);
/* Calculate size and assign stride, size, offset to each plane based on format */
- drv_bo_from_format(bo, stride, alignh, bo->format);
+ drv_bo_from_format(bo, stride, alignh, bo->meta.format);
/* For all RGB UBWC formats */
- if (bo->tiling == MSM_UBWC_TILING) {
- bo->sizes[0] += get_ubwc_meta_size(width, height, 16, 4);
- bo->total_size = bo->sizes[0];
- assert(IS_ALIGNED(bo->total_size, BUFFER_SIZE_ALIGN));
+ if (bo->meta.tiling == MSM_UBWC_TILING) {
+ bo->meta.sizes[0] += get_ubwc_meta_size(width, height, 16, 4);
+ bo->meta.total_size = bo->meta.sizes[0];
+ assert(IS_ALIGNED(bo->meta.total_size, BUFFER_SIZE_ALIGN));
}
}
}
@@ -131,6 +134,8 @@ static bool is_ubwc_fmt(uint32_t format)
switch (format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_NV12:
return 1;
default:
@@ -155,7 +160,7 @@ static void msm_add_ubwc_combinations(struct driver *drv, const uint32_t *format
static int msm_init(struct driver *drv)
{
struct format_metadata metadata;
- uint64_t render_use_flags = BO_USE_RENDER_MASK;
+ uint64_t render_use_flags = BO_USE_RENDER_MASK | BO_USE_SCANOUT;
uint64_t texture_use_flags = BO_USE_TEXTURE_MASK | BO_USE_HW_VIDEO_DECODER;
uint64_t sw_flags = (BO_USE_RENDERSCRIPT | BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_OFTEN |
BO_USE_LINEAR | BO_USE_PROTECTED);
@@ -173,6 +178,16 @@ static int msm_init(struct driver *drv)
drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
+ /* The camera stack standardizes on NV12 for YUV buffers. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT);
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
+ * from camera.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
@@ -201,13 +216,13 @@ static int msm_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t he
int ret;
size_t i;
- bo->tiling = (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) ? MSM_UBWC_TILING : 0;
+ bo->meta.tiling = (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) ? MSM_UBWC_TILING : 0;
msm_calculate_layout(bo);
memset(&req, 0, sizeof(req));
req.flags = MSM_BO_WC | MSM_BO_SCANOUT;
- req.size = bo->total_size;
+ req.size = bo->meta.total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MSM_GEM_NEW, &req);
if (ret) {
@@ -219,9 +234,9 @@ static int msm_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t he
* Though we use only one plane, we need to set handle for
* all planes to pass kernel checks
*/
- for (i = 0; i < bo->num_planes; i++) {
+ for (i = 0; i < bo->meta.num_planes; i++) {
bo->handles[i].u32 = req.handle;
- bo->format_modifiers[i] = modifier;
+ bo->meta.format_modifiers[i] = modifier;
}
return 0;
@@ -268,12 +283,22 @@ static void *msm_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t m
drv_log("DRM_IOCLT_MSM_GEM_INFO failed with %s\n", strerror(errno));
return MAP_FAILED;
}
- vma->length = bo->total_size;
+ vma->length = bo->meta.total_size;
- return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
req.offset);
}
+static uint32_t msm_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
+{
+ switch (format) {
+ case DRM_FORMAT_FLEX_YCbCr_420_888:
+ return DRM_FORMAT_NV12;
+ default:
+ return format;
+ }
+}
+
const struct backend backend_msm = {
.name = "msm",
.init = msm_init,
@@ -283,5 +308,6 @@ const struct backend backend_msm = {
.bo_import = drv_prime_bo_import,
.bo_map = msm_bo_map,
.bo_unmap = drv_bo_munmap,
+ .resolve_format = msm_resolve_format,
};
#endif /* DRV_MSM */
diff --git a/chromium/third_party/minigbm/src/presubmit.sh b/chromium/third_party/minigbm/src/presubmit.sh
index 1cfc59cee0b..5e8a32a256f 100755
--- a/chromium/third_party/minigbm/src/presubmit.sh
+++ b/chromium/third_party/minigbm/src/presubmit.sh
@@ -4,5 +4,6 @@
# found in the LICENSE file.
find \
'(' -name '*.[ch]' -or -name '*.cc' ')' \
+ -not -name 'virtgpu_drm.h' \
-not -name 'gbm.h' -not -name 'virgl_hw.h' \
-exec clang-format -style=file -i {} +
diff --git a/chromium/third_party/minigbm/src/rockchip.c b/chromium/third_party/minigbm/src/rockchip.c
index a1685af924f..25f16abdaa3 100644
--- a/chromium/third_party/minigbm/src/rockchip.c
+++ b/chromium/third_party/minigbm/src/rockchip.c
@@ -7,6 +7,7 @@
#ifdef DRV_ROCKCHIP
#include <errno.h>
+#include <inttypes.h>
#include <rockchip_drm.h>
#include <stdio.h>
#include <string.h>
@@ -22,12 +23,12 @@ struct rockchip_private_map_data {
void *gem_addr;
};
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_BGR888, DRM_FORMAT_RGB565,
- DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 };
+static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGR888, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 };
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12,
- DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+static const uint32_t texture_only_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU420_ANDROID };
static int afbc_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, uint32_t format)
{
@@ -62,98 +63,50 @@ static int afbc_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, u
const uint32_t body_plane_offset = ALIGN(header_plane_size, body_plane_alignment);
const uint32_t total_size = body_plane_offset + body_plane_size;
- bo->strides[0] = width_in_blocks * block_width * pixel_size;
- bo->sizes[0] = total_size;
- bo->offsets[0] = 0;
+ bo->meta.strides[0] = width_in_blocks * block_width * pixel_size;
+ bo->meta.sizes[0] = total_size;
+ bo->meta.offsets[0] = 0;
- bo->total_size = total_size;
+ bo->meta.total_size = total_size;
- bo->format_modifiers[0] = DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC;
-
- return 0;
-}
-
-static int rockchip_add_kms_item(struct driver *drv, const struct kms_item *item)
-{
- uint32_t i, j;
- uint64_t use_flags;
- struct combination *combo;
- struct format_metadata metadata;
-
- for (i = 0; i < drv_array_size(drv->combos); i++) {
- combo = (struct combination *)drv_array_at_idx(drv->combos, i);
- if (combo->format == item->format) {
- if (item->modifier == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC) {
- use_flags = BO_USE_RENDERING | BO_USE_SCANOUT | BO_USE_TEXTURE;
- metadata.modifier = item->modifier;
- metadata.tiling = 0;
- metadata.priority = 2;
-
- for (j = 0; j < ARRAY_SIZE(texture_source_formats); j++) {
- if (item->format == texture_source_formats[j])
- use_flags &= ~BO_USE_RENDERING;
- }
-
- drv_add_combinations(drv, &item->format, 1, &metadata, use_flags);
- } else {
- combo->use_flags |= item->use_flags;
- }
- }
- }
+ bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC;
return 0;
}
static int rockchip_init(struct driver *drv)
{
- int ret;
- uint32_t i;
- struct drv_array *kms_items;
struct format_metadata metadata;
metadata.tiling = 0;
metadata.priority = 1;
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, BO_USE_RENDER_MASK);
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &metadata, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
- drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &metadata, BO_USE_TEXTURE_MASK);
+ drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
+ BO_USE_TEXTURE_MASK);
/*
* Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
* Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
*/
drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_ENCODER);
-
- drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
-
/* Camera ISP supports only NV12 output. */
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER | BO_USE_SCANOUT);
+
+ drv_modify_linear_combinations(drv);
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
* from camera.
*/
- drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
-
- kms_items = drv_query_kms(drv);
- if (!kms_items)
- return 0;
-
- for (i = 0; i < drv_array_size(kms_items); i++) {
- ret = rockchip_add_kms_item(drv, (struct kms_item *)drv_array_at_idx(kms_items, i));
- if (ret) {
- drv_array_destroy(kms_items);
- return ret;
- }
- }
+ drv_add_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SW_MASK |
+ BO_USE_LINEAR | BO_USE_PROTECTED);
- drv_array_destroy(kms_items);
return 0;
}
@@ -177,7 +130,7 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
* drv_bo_from_format updates total_size. Add an extra data space for rockchip video
* driver to store motion vectors.
*/
- bo->total_size += w_mbs * h_mbs * 128;
+ bo->meta.total_size += w_mbs * h_mbs * 128;
} else if (width <= 2560 &&
drv_has_modifier(modifiers, count, DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC)) {
/* If the caller has decided they can use AFBC, always
@@ -207,17 +160,17 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
}
memset(&gem_create, 0, sizeof(gem_create));
- gem_create.size = bo->total_size;
+ gem_create.size = bo->meta.total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE, &gem_create);
if (ret) {
- drv_log("DRM_IOCTL_ROCKCHIP_GEM_CREATE failed (size=%llu)\n",
- (unsigned long long)gem_create.size);
+ drv_log("DRM_IOCTL_ROCKCHIP_GEM_CREATE failed (size=%" PRIu64 ")\n",
+ gem_create.size);
return -errno;
}
- for (plane = 0; plane < bo->num_planes; plane++)
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
bo->handles[plane].u32 = gem_create.handle;
return 0;
@@ -239,7 +192,7 @@ static void *rockchip_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint3
/* We can only map buffers created with SW access flags, which should
* have no modifiers (ie, not AFBC). */
- if (bo->format_modifiers[0] == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC)
+ if (bo->meta.format_modifiers[0] == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC)
return MAP_FAILED;
memset(&gem_map, 0, sizeof(gem_map));
@@ -251,14 +204,14 @@ static void *rockchip_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint3
return MAP_FAILED;
}
- void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ void *addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
- vma->length = bo->total_size;
+ vma->length = bo->meta.total_size;
- if (bo->use_flags & BO_USE_RENDERSCRIPT) {
+ if (bo->meta.use_flags & BO_USE_RENDERSCRIPT) {
priv = calloc(1, sizeof(*priv));
- priv->cached_addr = calloc(1, bo->total_size);
+ priv->cached_addr = calloc(1, bo->meta.total_size);
priv->gem_addr = addr;
vma->priv = priv;
addr = priv->cached_addr;
@@ -284,7 +237,7 @@ static int rockchip_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
if (mapping->vma->priv) {
struct rockchip_private_map_data *priv = mapping->vma->priv;
- memcpy(priv->cached_addr, priv->gem_addr, bo->total_size);
+ memcpy(priv->cached_addr, priv->gem_addr, bo->meta.total_size);
}
return 0;
@@ -294,7 +247,7 @@ static int rockchip_bo_flush(struct bo *bo, struct mapping *mapping)
{
struct rockchip_private_map_data *priv = mapping->vma->priv;
if (priv && (mapping->vma->map_flags & BO_MAP_WRITE))
- memcpy(priv->gem_addr, priv->cached_addr, bo->total_size);
+ memcpy(priv->gem_addr, priv->cached_addr, bo->meta.total_size);
return 0;
}
diff --git a/chromium/third_party/minigbm/src/synaptics.c b/chromium/third_party/minigbm/src/synaptics.c
new file mode 100644
index 00000000000..bcd8189cfea
--- /dev/null
+++ b/chromium/third_party/minigbm/src/synaptics.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifdef DRV_SYNAPTICS
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888 };
+
+static int synaptics_init(struct driver *drv)
+{
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+
+ return drv_modify_linear_combinations(drv);
+}
+
+const struct backend backend_synaptics = {
+ .name = "synaptics",
+ .init = synaptics_init,
+ .bo_create = drv_dumb_bo_create,
+ .bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
+ .bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
+};
+
+#endif
diff --git a/chromium/third_party/minigbm/src/tegra.c b/chromium/third_party/minigbm/src/tegra.c
index 4b6b8d78dcf..df97461c379 100644
--- a/chromium/third_party/minigbm/src/tegra.c
+++ b/chromium/third_party/minigbm/src/tegra.c
@@ -119,12 +119,12 @@ static void transfer_tile(struct bo *bo, uint8_t *tiled, uint8_t *untiled, enum
if (tiled >= tiled_last)
return;
- if (x >= bo->width || y >= bo->height) {
+ if (x >= bo->meta.width || y >= bo->meta.height) {
tiled += bytes_per_pixel;
continue;
}
- tmp = untiled + y * bo->strides[0] + x * bytes_per_pixel;
+ tmp = untiled + y * bo->meta.strides[0] + x * bytes_per_pixel;
if (type == TEGRA_READ_TILED_BUFFER)
memcpy(tmp, tiled, bytes_per_pixel);
@@ -143,7 +143,7 @@ static void transfer_tiled_memory(struct bo *bo, uint8_t *tiled, uint8_t *untile
gob_top, gob_left;
uint32_t i, j, offset;
uint8_t *tmp, *tiled_last;
- uint32_t bytes_per_pixel = drv_stride_from_format(bo->format, 1, 0);
+ uint32_t bytes_per_pixel = drv_stride_from_format(bo->meta.format, 1, 0);
/*
* The blocklinear format consists of 8*(2^n) x 64 byte sized tiles,
@@ -152,16 +152,16 @@ static void transfer_tiled_memory(struct bo *bo, uint8_t *tiled, uint8_t *untile
gob_width = DIV_ROUND_UP(NV_BLOCKLINEAR_GOB_WIDTH, bytes_per_pixel);
gob_height = NV_BLOCKLINEAR_GOB_HEIGHT * (1 << NV_DEFAULT_BLOCK_HEIGHT_LOG2);
/* Calculate the height from maximum possible gob height */
- while (gob_height > NV_BLOCKLINEAR_GOB_HEIGHT && gob_height >= 2 * bo->height)
+ while (gob_height > NV_BLOCKLINEAR_GOB_HEIGHT && gob_height >= 2 * bo->meta.height)
gob_height /= 2;
gob_size_bytes = gob_height * NV_BLOCKLINEAR_GOB_WIDTH;
gob_size_pixels = gob_height * gob_width;
- gob_count_x = DIV_ROUND_UP(bo->strides[0], NV_BLOCKLINEAR_GOB_WIDTH);
- gob_count_y = DIV_ROUND_UP(bo->height, gob_height);
+ gob_count_x = DIV_ROUND_UP(bo->meta.strides[0], NV_BLOCKLINEAR_GOB_WIDTH);
+ gob_count_y = DIV_ROUND_UP(bo->meta.height, gob_height);
- tiled_last = tiled + bo->total_size;
+ tiled_last = tiled + bo->meta.total_size;
offset = 0;
for (j = 0; j < gob_count_y; j++) {
@@ -234,9 +234,9 @@ static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint3
}
bo->handles[0].u32 = gem_create.handle;
- bo->offsets[0] = 0;
- bo->total_size = bo->sizes[0] = size;
- bo->strides[0] = stride;
+ bo->meta.offsets[0] = 0;
+ bo->meta.total_size = bo->meta.sizes[0] = size;
+ bo->meta.strides[0] = stride;
if (kind != NV_MEM_KIND_PITCH) {
struct drm_tegra_gem_set_tiling gem_tile;
@@ -254,8 +254,8 @@ static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint3
}
/* Encode blocklinear parameters for EGLImage creation. */
- bo->tiling = (kind & 0xff) | ((block_height_log2 & 0xf) << 8);
- bo->format_modifiers[0] = fourcc_mod_code(NV, bo->tiling);
+ bo->meta.tiling = (kind & 0xff) | ((block_height_log2 & 0xf) << 8);
+ bo->meta.format_modifiers[0] = fourcc_mod_code(NV, bo->meta.tiling);
}
return 0;
@@ -283,16 +283,16 @@ static int tegra_bo_import(struct bo *bo, struct drv_import_fd_data *data)
/* NOTE(djmk): we only know about one tiled format, so if our drmIoctl call tells us we are
tiled, assume it is this format (NV_MEM_KIND_C32_2CRA) otherwise linear (KIND_PITCH). */
if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_PITCH) {
- bo->tiling = NV_MEM_KIND_PITCH;
+ bo->meta.tiling = NV_MEM_KIND_PITCH;
} else if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_BLOCK) {
- bo->tiling = NV_MEM_KIND_C32_2CRA;
+ bo->meta.tiling = NV_MEM_KIND_C32_2CRA;
} else {
drv_log("%s: unknown tile format %d\n", __func__, gem_get_tiling.mode);
drv_gem_bo_destroy(bo);
assert(0);
}
- bo->format_modifiers[0] = fourcc_mod_code(NV, bo->tiling);
+ bo->meta.format_modifiers[0] = fourcc_mod_code(NV, bo->meta.tiling);
return 0;
}
@@ -311,12 +311,12 @@ static void *tegra_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t
return MAP_FAILED;
}
- void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ void *addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
- vma->length = bo->total_size;
- if ((bo->tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) {
+ vma->length = bo->meta.total_size;
+ if ((bo->meta.tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) {
priv = calloc(1, sizeof(*priv));
- priv->untiled = calloc(1, bo->total_size);
+ priv->untiled = calloc(1, bo->meta.total_size);
priv->tiled = addr;
vma->priv = priv;
transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_READ_TILED_BUFFER);
diff --git a/chromium/third_party/minigbm/src/util.h b/chromium/third_party/minigbm/src/util.h
index e4e13991473..8f8bb0d340e 100644
--- a/chromium/third_party/minigbm/src/util.h
+++ b/chromium/third_party/minigbm/src/util.h
@@ -13,5 +13,7 @@
#define ALIGN(A, B) (((A) + (B)-1) & ~((B)-1))
#define IS_ALIGNED(A, B) (ALIGN((A), (B)) == (A))
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
+#define STRINGIZE_NO_EXPANSION(x) #x
+#define STRINGIZE(x) STRINGIZE_NO_EXPANSION(x)
#endif
diff --git a/chromium/third_party/minigbm/src/vc4.c b/chromium/third_party/minigbm/src/vc4.c
index 6edd967c9cf..06b3ed77d9b 100644
--- a/chromium/third_party/minigbm/src/vc4.c
+++ b/chromium/third_party/minigbm/src/vc4.c
@@ -28,14 +28,24 @@ static int vc4_init(struct driver *drv)
return drv_modify_linear_combinations(drv);
}
-static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+static int vc4_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height,
+ uint32_t format, uint64_t modifier)
{
int ret;
size_t plane;
uint32_t stride;
struct drm_vc4_create_bo bo_create;
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+ drv_log("DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED not supported yet\n");
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+
/*
* Since the ARM L1 cache line size is 64 bytes, align to that as a
* performance optimization.
@@ -45,20 +55,45 @@ static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_
drv_bo_from_format(bo, stride, height, format);
memset(&bo_create, 0, sizeof(bo_create));
- bo_create.size = bo->total_size;
+ bo_create.size = bo->meta.total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VC4_CREATE_BO, &bo_create);
if (ret) {
- drv_log("DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n", bo->total_size);
+ drv_log("DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n", bo->meta.total_size);
return -errno;
}
- for (plane = 0; plane < bo->num_planes; plane++)
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
bo->handles[plane].u32 = bo_create.handle;
return 0;
}
+static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ struct combination *combo;
+
+ combo = drv_get_combination(bo->drv, format, use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ return vc4_bo_create_for_modifier(bo, width, height, format, combo->metadata.modifier);
+}
+
+static int vc4_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
+ uint32_t format, const uint64_t *modifiers, uint32_t count)
+{
+ static const uint64_t modifier_order[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ };
+ uint64_t modifier;
+
+ modifier = drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order));
+
+ return vc4_bo_create_for_modifier(bo, width, height, format, modifier);
+}
+
static void *vc4_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
@@ -73,8 +108,8 @@ static void *vc4_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t m
return MAP_FAILED;
}
- vma->length = bo->total_size;
- return mmap(NULL, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ vma->length = bo->meta.total_size;
+ return mmap(NULL, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
bo_map.offset);
}
@@ -82,6 +117,7 @@ const struct backend backend_vc4 = {
.name = "vc4",
.init = vc4_init,
.bo_create = vc4_bo_create,
+ .bo_create_with_modifiers = vc4_bo_create_with_modifiers,
.bo_import = drv_prime_bo_import,
.bo_destroy = drv_gem_bo_destroy,
.bo_map = vc4_bo_map,
diff --git a/chromium/third_party/minigbm/src/virgl_hw.h b/chromium/third_party/minigbm/src/virgl_hw.h
index 94e1d5ea3e1..145780bf83e 100644
--- a/chromium/third_party/minigbm/src/virgl_hw.h
+++ b/chromium/third_party/minigbm/src/virgl_hw.h
@@ -288,6 +288,8 @@ enum virgl_formats {
#define VIRGL_BIND_PREFER_EMULATED_BGRA (1 << 21)
+#define VIRGL_BIND_LINEAR (1 << 22)
+
struct virgl_caps_bool_set1 {
unsigned indep_blend_enable:1;
unsigned indep_blend_func:1;
@@ -398,6 +400,7 @@ struct virgl_caps_v2 {
uint32_t max_combined_atomic_counter_buffers;
uint32_t host_feature_check_version;
struct virgl_supported_format_mask supported_readback_formats;
+ struct virgl_supported_format_mask scanout;
};
union virgl_caps {
diff --git a/chromium/third_party/minigbm/src/virtgpu_drm.h b/chromium/third_party/minigbm/src/virtgpu_drm.h
new file mode 100644
index 00000000000..a92d764d481
--- /dev/null
+++ b/chromium/third_party/minigbm/src/virtgpu_drm.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2013 Red Hat
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRTGPU_DRM_H
+#define VIRTGPU_DRM_H
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ *
+ * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
+ * compatibility Keep fields aligned to their size
+ */
+
+#define DRM_VIRTGPU_MAP 0x01
+#define DRM_VIRTGPU_EXECBUFFER 0x02
+#define DRM_VIRTGPU_GETPARAM 0x03
+#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
+#define DRM_VIRTGPU_RESOURCE_INFO 0x05
+#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
+#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
+#define DRM_VIRTGPU_WAIT 0x08
+#define DRM_VIRTGPU_GET_CAPS 0x09
+
+#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
+#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_FLAGS (\
+ VIRTGPU_EXECBUF_FENCE_FD_IN |\
+ VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ 0)
+
+struct drm_virtgpu_map {
+ __u64 offset; /* use for mmap system call */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_virtgpu_execbuffer {
+ __u32 flags;
+ __u32 size;
+ __u64 command; /* void* */
+ __u64 bo_handles;
+ __u32 num_bo_handles;
+ __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+};
+
+#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+
+struct drm_virtgpu_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+/* NO_BO flags? NO resource flag? */
+/* resource flag for y_0_top */
+struct drm_virtgpu_resource_create {
+ __u32 target;
+ __u32 format;
+ __u32 bind;
+ __u32 width;
+ __u32 height;
+ __u32 depth;
+ __u32 array_size;
+ __u32 last_level;
+ __u32 nr_samples;
+ __u32 flags;
+ __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
+ __u32 res_handle; /* returned by kernel */
+ __u32 size; /* validate transfer in the host */
+ __u32 stride; /* validate transfer in the host */
+};
+
+struct drm_virtgpu_resource_info {
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u32 size;
+ union {
+ __u32 stride;
+ __u32 strides[4]; /* strides[0] is accessible with stride. */
+ };
+ __u32 num_planes;
+ __u32 offsets[4];
+ __u64 format_modifier;
+};
+
+struct drm_virtgpu_3d_box {
+ __u32 x;
+ __u32 y;
+ __u32 z;
+ __u32 w;
+ __u32 h;
+ __u32 d;
+};
+
+struct drm_virtgpu_3d_transfer_to_host {
+ __u32 bo_handle;
+ struct drm_virtgpu_3d_box box;
+ __u32 level;
+ __u32 offset;
+};
+
+struct drm_virtgpu_3d_transfer_from_host {
+ __u32 bo_handle;
+ struct drm_virtgpu_3d_box box;
+ __u32 level;
+ __u32 offset;
+};
+
+#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
+struct drm_virtgpu_3d_wait {
+ __u32 handle; /* 0 is an invalid handle */
+ __u32 flags;
+};
+
+struct drm_virtgpu_get_caps {
+ __u32 cap_set_id;
+ __u32 cap_set_ver;
+ __u64 addr;
+ __u32 size;
+ __u32 pad;
+};
+
+#define DRM_IOCTL_VIRTGPU_MAP \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
+
+#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+ struct drm_virtgpu_execbuffer)
+
+#define DRM_IOCTL_VIRTGPU_GETPARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
+ struct drm_virtgpu_getparam)
+
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
+ struct drm_virtgpu_resource_create)
+
+#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
+ struct drm_virtgpu_resource_info)
+
+#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
+ struct drm_virtgpu_3d_transfer_from_host)
+
+#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
+ struct drm_virtgpu_3d_transfer_to_host)
+
+#define DRM_IOCTL_VIRTGPU_WAIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
+ struct drm_virtgpu_3d_wait)
+
+#define DRM_IOCTL_VIRTGPU_GET_CAPS \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
+ struct drm_virtgpu_get_caps)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/chromium/third_party/minigbm/src/virtio_gpu.c b/chromium/third_party/minigbm/src/virtio_gpu.c
index 163207e2c21..eb6c97d6f8e 100644
--- a/chromium/third_party/minigbm/src/virtio_gpu.c
+++ b/chromium/third_party/minigbm/src/virtio_gpu.c
@@ -9,13 +9,13 @@
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
-#include <virtgpu_drm.h>
#include <xf86drm.h>
#include "drv_priv.h"
#include "helpers.h"
#include "util.h"
#include "virgl_hw.h"
+#include "virtgpu_drm.h"
#ifndef PAGE_SIZE
#define PAGE_SIZE 0x1000
@@ -30,6 +30,7 @@ static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMA
DRM_FORMAT_XRGB8888 };
static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
+ DRM_FORMAT_NV12,
DRM_FORMAT_YVU420_ANDROID };
static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, DRM_FORMAT_RG88,
@@ -37,9 +38,11 @@ static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R
struct virtio_gpu_priv {
int has_3d;
+ int caps_is_v2;
+ union virgl_caps caps;
};
-static uint32_t translate_format(uint32_t drm_fourcc, uint32_t plane)
+static uint32_t translate_format(uint32_t drm_fourcc)
{
switch (drm_fourcc) {
case DRM_FORMAT_XRGB8888:
@@ -66,15 +69,70 @@ static uint32_t translate_format(uint32_t drm_fourcc, uint32_t plane)
}
}
+static bool virtio_gpu_supports_format(struct virgl_supported_format_mask *supported,
+ uint32_t drm_format)
+{
+ uint32_t virgl_format = translate_format(drm_format);
+ if (!virgl_format) {
+ return false;
+ }
+
+ uint32_t bitmask_index = virgl_format / 32;
+ uint32_t bit_index = virgl_format % 32;
+ return supported->bitmask[bitmask_index] & (1 << bit_index);
+}
+
+// Adds the given buffer combination to the list of supported buffer combinations if the
+// combination is supported by the virtio backend.
+static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
+ struct format_metadata *metadata, uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ if (priv->has_3d && priv->caps.max_version >= 1) {
+ if ((use_flags & BO_USE_RENDERING) &&
+ !virtio_gpu_supports_format(&priv->caps.v1.render, drm_format)) {
+ drv_log("Skipping unsupported render format: %d\n", drm_format);
+ return;
+ }
+
+ if ((use_flags & BO_USE_TEXTURE) &&
+ !virtio_gpu_supports_format(&priv->caps.v1.sampler, drm_format)) {
+ drv_log("Skipping unsupported texture format: %d\n", drm_format);
+ return;
+ }
+ if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
+ !virtio_gpu_supports_format(&priv->caps.v2.scanout, drm_format)) {
+ drv_log("Unsupported scanout format: %d\n", drm_format);
+ use_flags &= ~BO_USE_SCANOUT;
+ }
+ }
+
+ drv_add_combination(drv, drm_format, metadata, use_flags);
+}
+
+// Adds each given buffer combination to the list of supported buffer combinations if the
+// combination supported by the virtio backend.
+static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
+ uint32_t num_formats, struct format_metadata *metadata,
+ uint64_t use_flags)
+{
+ uint32_t i;
+
+ for (i = 0; i < num_formats; i++) {
+ virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
+ }
+}
+
static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags)
{
- if (bo->format != DRM_FORMAT_R8) {
+ if (bo->meta.format != DRM_FORMAT_R8) {
width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
}
- return drv_dumb_bo_create(bo, width, height, format, use_flags);
+ return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
}
static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
@@ -88,15 +146,29 @@ static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bi
static uint32_t use_flags_to_bind(uint64_t use_flags)
{
- uint32_t bind = 0;
+ /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
+ uint32_t bind = VIRGL_BIND_SHARED;
handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
- // TODO (b/12983436): handle other use flags.
+ handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
+ handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
+
+ handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR);
+ handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR);
+ handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR);
+ handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR);
+
+ // All host drivers only support linear camera buffer formats. If
+ // that changes, this will need to be modified.
+ handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR);
+ handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR);
+
if (use_flags) {
drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
}
+
return bind;
}
@@ -104,66 +176,44 @@ static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height
uint64_t use_flags)
{
int ret;
- ssize_t plane;
- ssize_t num_planes = drv_num_planes_from_format(format);
- uint32_t stride0;
- uint32_t bind = use_flags_to_bind(use_flags);
-
- for (plane = 0; plane < num_planes; plane++) {
- uint32_t stride = drv_stride_from_format(format, width, plane);
- uint32_t size = drv_size_from_format(format, stride, height, plane);
- uint32_t res_format = translate_format(format, plane);
- struct drm_virtgpu_resource_create res_create;
-
- memset(&res_create, 0, sizeof(res_create));
- size = ALIGN(size, PAGE_SIZE);
- /*
- * Setting the target is intended to ensure this resource gets bound as a 2D
- * texture in the host renderer's GL state. All of these resource properties are
- * sent unchanged by the kernel to the host, which in turn sends them unchanged to
- * virglrenderer. When virglrenderer makes a resource, it will convert the target
- * enum to the equivalent one in GL and then bind the resource to that target.
- */
- res_create.target = PIPE_TEXTURE_2D;
- res_create.format = res_format;
- res_create.bind = bind;
- res_create.width = width;
- res_create.height = height;
- res_create.depth = 1;
- res_create.array_size = 1;
- res_create.last_level = 0;
- res_create.nr_samples = 0;
- res_create.stride = stride;
- res_create.size = size;
-
- ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
- if (ret) {
- drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n",
- strerror(errno));
- ret = -errno;
- goto fail;
- }
-
- bo->handles[plane].u32 = res_create.bo_handle;
+ uint32_t stride;
+ struct drm_virtgpu_resource_create res_create;
+
+ stride = drv_stride_from_format(format, width, 0);
+ drv_bo_from_format(bo, stride, height, format);
+
+ /*
+ * Setting the target is intended to ensure this resource gets bound as a 2D
+ * texture in the host renderer's GL state. All of these resource properties are
+ * sent unchanged by the kernel to the host, which in turn sends them unchanged to
+ * virglrenderer. When virglrenderer makes a resource, it will convert the target
+ * enum to the equivalent one in GL and then bind the resource to that target.
+ */
+ memset(&res_create, 0, sizeof(res_create));
+
+ res_create.target = PIPE_TEXTURE_2D;
+ res_create.format = translate_format(format);
+ res_create.bind = use_flags_to_bind(use_flags);
+ res_create.width = width;
+ res_create.height = height;
+
+ /* For virgl 3D */
+ res_create.depth = 1;
+ res_create.array_size = 1;
+ res_create.last_level = 0;
+ res_create.nr_samples = 0;
+
+ res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
+ return ret;
}
- stride0 = drv_stride_from_format(format, width, 0);
- drv_bo_from_format(bo, stride0, height, format);
-
- for (plane = 0; plane < num_planes; plane++)
- bo->offsets[plane] = 0;
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
+ bo->handles[plane].u32 = res_create.bo_handle;
return 0;
-
-fail:
- for (plane--; plane >= 0; plane--) {
- struct drm_gem_close gem_close;
- memset(&gem_close, 0, sizeof(gem_close));
- gem_close.handle = bo->handles[plane].u32;
- drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
- }
-
- return ret;
}
static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
@@ -180,11 +230,56 @@ static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, u
return MAP_FAILED;
}
- vma->length = bo->total_size;
- return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ vma->length = bo->meta.total_size;
+ return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
}
+static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
+{
+ int ret;
+ struct drm_virtgpu_get_caps cap_args;
+ struct drm_virtgpu_getparam param_args;
+ uint32_t can_query_v2 = 0;
+
+ memset(&param_args, 0, sizeof(param_args));
+ param_args.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
+ param_args.value = (uint64_t)(uintptr_t)&can_query_v2;
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &param_args);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_GETPARAM failed with %s\n", strerror(errno));
+ }
+
+ *caps_is_v2 = 0;
+ memset(&cap_args, 0, sizeof(cap_args));
+ cap_args.addr = (unsigned long long)caps;
+ if (can_query_v2) {
+ *caps_is_v2 = 1;
+ cap_args.cap_set_id = 2;
+ cap_args.size = sizeof(union virgl_caps);
+ } else {
+ cap_args.cap_set_id = 1;
+ cap_args.size = sizeof(struct virgl_caps_v1);
+ }
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
+ *caps_is_v2 = 0;
+
+ // Fallback to v1
+ cap_args.cap_set_id = 1;
+ cap_args.size = sizeof(struct virgl_caps_v1);
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
+ }
+ }
+
+ return ret;
+}
+
static int virtio_gpu_init(struct driver *drv)
{
int ret;
@@ -204,28 +299,43 @@ static int virtio_gpu_init(struct driver *drv)
priv->has_3d = 0;
}
- /* This doesn't mean host can scanout everything, it just means host
- * hypervisor can show it. */
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
-
if (priv->has_3d) {
- drv_add_combinations(drv, texture_source_formats,
- ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
- BO_USE_TEXTURE_MASK);
+ virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
+
+ /* This doesn't mean host can scanout everything, it just means host
+ * hypervisor can show it. */
+ virtio_gpu_add_combinations(drv, render_target_formats,
+ ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
+ BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+ virtio_gpu_add_combinations(drv, texture_source_formats,
+ ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
+ BO_USE_TEXTURE_MASK);
} else {
- drv_add_combinations(drv, dumb_texture_source_formats,
- ARRAY_SIZE(dumb_texture_source_formats), &LINEAR_METADATA,
- BO_USE_TEXTURE_MASK);
- drv_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
- BO_USE_SW_MASK | BO_USE_LINEAR);
+ /* Virtio primary plane only allows this format. */
+ virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
+ BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+ /* Virtio cursor plane only allows this format and Chrome cannot live without
+ * ARGB888 renderable format. */
+ virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
+ BO_USE_RENDER_MASK | BO_USE_CURSOR);
+ /* Android needs more, but they cannot be bound as scanouts anymore after
+ * "drm/virtio: fix DRM_FORMAT_* handling" */
+ virtio_gpu_add_combinations(drv, render_target_formats,
+ ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
+ BO_USE_RENDER_MASK);
+ virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
+ ARRAY_SIZE(dumb_texture_source_formats),
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+ virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
+ BO_USE_SW_MASK | BO_USE_LINEAR);
}
/* Android CTS tests require this. */
- drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
+ virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
@@ -271,10 +381,16 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
int ret;
struct drm_virtgpu_3d_transfer_from_host xfer;
struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ struct drm_virtgpu_3d_wait waitcmd;
if (!priv->has_3d)
return 0;
+ // Invalidate is only necessary if the host writes to the buffer.
+ if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE |
+ BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
+ return 0;
+
memset(&xfer, 0, sizeof(xfer));
xfer.bo_handle = mapping->vma->handle;
xfer.box.x = mapping->rect.x;
@@ -283,12 +399,34 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
xfer.box.h = mapping->rect.height;
xfer.box.d = 1;
+ if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
+ // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
+ // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). For gbm
+ // based resources, we can work around this by using the level field to pass
+ // the stride to virglrenderer's gbm transfer code. However, we need to avoid
+ // doing this for resources which don't rely on that transfer code, which is
+ // resources with the BO_USE_RENDERING flag set.
+ // TODO(b/145993887): Send also stride when the patches are landed
+ xfer.level = bo->meta.strides[0];
+ }
+
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
if (ret) {
drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
return -errno;
}
+ // The transfer needs to complete before invalidate returns so that any host changes
+ // are visible and to ensure the host doesn't overwrite subsequent guest changes.
+ // TODO(b/136733358): Support returning fences from transfers
+ memset(&waitcmd, 0, sizeof(waitcmd));
+ waitcmd.handle = mapping->vma->handle;
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
+ return -errno;
+ }
+
return 0;
}
@@ -297,6 +435,7 @@ static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
int ret;
struct drm_virtgpu_3d_transfer_to_host xfer;
struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ struct drm_virtgpu_3d_wait waitcmd;
if (!priv->has_3d)
return 0;
@@ -312,12 +451,32 @@ static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
xfer.box.h = mapping->rect.height;
xfer.box.d = 1;
+ // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
+ // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
+ // the level to work around this.
+ xfer.level = bo->meta.strides[0];
+
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
if (ret) {
drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
return -errno;
}
+ // If the buffer is only accessed by the host GPU, then the flush is ordered
+ // with subsequent commands. However, if other host hardware can access the
+ // buffer, we need to wait for the transfer to complete for consistency.
+ // TODO(b/136733358): Support returning fences from transfers
+ if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
+ memset(&waitcmd, 0, sizeof(waitcmd));
+ waitcmd.handle = mapping->vma->handle;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
+ return -errno;
+ }
+ }
+
return 0;
}
@@ -346,6 +505,38 @@ static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, u
}
}
+static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES])
+{
+ int ret;
+ struct drm_virtgpu_resource_info res_info;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+
+ if (!priv->has_3d)
+ return 0;
+
+ memset(&res_info, 0, sizeof(res_info));
+ res_info.bo_handle = bo->handles[0].u32;
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
+ return ret;
+ }
+
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
+ /*
+ * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
+ * ioctl.
+ */
+ if (res_info.strides[plane]) {
+ strides[plane] = res_info.strides[plane];
+ offsets[plane] = res_info.offsets[plane];
+ }
+ }
+
+ return 0;
+}
+
const struct backend backend_virtio_gpu = {
.name = "virtio_gpu",
.init = virtio_gpu_init,
@@ -358,4 +549,5 @@ const struct backend backend_virtio_gpu = {
.bo_invalidate = virtio_gpu_bo_invalidate,
.bo_flush = virtio_gpu_bo_flush,
.resolve_format = virtio_gpu_resolve_format,
+ .resource_info = virtio_gpu_resource_info,
};