summaryrefslogtreecommitdiff
path: root/chromium/third_party/libdrm
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-04-05 14:08:31 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-04-11 07:46:53 +0000
commit6a4cabb866f66d4128a97cdc6d9d08ce074f1247 (patch)
treeab00f70a5e89278d6a0d16ff0c42578dc4d84a2d /chromium/third_party/libdrm
parente733310db58160074f574c429d48f8308c0afe17 (diff)
downloadqtwebengine-chromium-6a4cabb866f66d4128a97cdc6d9d08ce074f1247.tar.gz
BASELINE: Update Chromium to 57.0.2987.144
Change-Id: I29db402ff696c71a04c4dbaec822c2e53efe0267 Reviewed-by: Peter Varga <pvarga@inf.u-szeged.hu>
Diffstat (limited to 'chromium/third_party/libdrm')
-rw-r--r--chromium/third_party/libdrm/BUILD.gn54
-rw-r--r--chromium/third_party/libdrm/OWNERS3
-rw-r--r--chromium/third_party/libdrm/README.chromium13
-rw-r--r--chromium/third_party/libdrm/src/Android.mk66
-rw-r--r--chromium/third_party/libdrm/src/CleanSpec.mk4
-rw-r--r--chromium/third_party/libdrm/src/Makefile.am146
-rw-r--r--chromium/third_party/libdrm/src/Makefile.sources42
-rw-r--r--chromium/third_party/libdrm/src/PRESUBMIT.cfg3
-rw-r--r--chromium/third_party/libdrm/src/README41
-rw-r--r--chromium/third_party/libdrm/src/RELEASING65
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/Android.mk18
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/Makefile.am47
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/Makefile.sources14
-rwxr-xr-xchromium/third_party/libdrm/src/amdgpu/amdgpu-symbol-check55
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/amdgpu.h1248
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/amdgpu_bo.c705
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/amdgpu_cs.c544
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/amdgpu_device.c305
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/amdgpu_gpu_info.c310
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/amdgpu_internal.h204
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/amdgpu_vamgr.c287
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/libdrm_amdgpu.pc.in11
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/util_hash.c387
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/util_hash.h107
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/util_hash_table.c262
-rw-r--r--chromium/third_party/libdrm/src/amdgpu/util_hash_table.h73
-rwxr-xr-xchromium/third_party/libdrm/src/autogen.sh14
-rw-r--r--chromium/third_party/libdrm/src/configure.ac584
-rw-r--r--chromium/third_party/libdrm/src/exynos/Makefile.am27
-rwxr-xr-xchromium/third_party/libdrm/src/exynos/exynos-symbol-check40
-rw-r--r--chromium/third_party/libdrm/src/exynos/exynos_drm.c452
-rw-r--r--chromium/third_party/libdrm/src/exynos/exynos_drm.h172
-rw-r--r--chromium/third_party/libdrm/src/exynos/exynos_drmif.h112
-rw-r--r--chromium/third_party/libdrm/src/exynos/exynos_fimg2d.c1015
-rw-r--r--chromium/third_party/libdrm/src/exynos/exynos_fimg2d.h320
-rw-r--r--chromium/third_party/libdrm/src/exynos/fimg2d_reg.h114
-rw-r--r--chromium/third_party/libdrm/src/exynos/libdrm_exynos.pc.in11
-rw-r--r--chromium/third_party/libdrm/src/freedreno/Android.mk18
-rw-r--r--chromium/third_party/libdrm/src/freedreno/Makefile.am30
-rw-r--r--chromium/third_party/libdrm/src/freedreno/Makefile.sources26
-rwxr-xr-xchromium/third_party/libdrm/src/freedreno/freedreno-symbol-check58
-rw-r--r--chromium/third_party/libdrm/src/freedreno/freedreno_bo.c323
-rw-r--r--chromium/third_party/libdrm/src/freedreno/freedreno_bo_cache.c222
-rw-r--r--chromium/third_party/libdrm/src/freedreno/freedreno_device.c147
-rw-r--r--chromium/third_party/libdrm/src/freedreno/freedreno_drmif.h132
-rw-r--r--chromium/third_party/libdrm/src/freedreno/freedreno_pipe.c82
-rw-r--r--chromium/third_party/libdrm/src/freedreno/freedreno_priv.h197
-rw-r--r--chromium/third_party/libdrm/src/freedreno/freedreno_ringbuffer.c181
-rw-r--r--chromium/third_party/libdrm/src/freedreno/freedreno_ringbuffer.h92
-rw-r--r--chromium/third_party/libdrm/src/freedreno/kgsl/README26
-rw-r--r--chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_bo.c315
-rw-r--r--chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_device.c65
-rw-r--r--chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_drm.h192
-rw-r--r--chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_pipe.c280
-rw-r--r--chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_priv.h120
-rw-r--r--chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_ringbuffer.c231
-rw-r--r--chromium/third_party/libdrm/src/freedreno/kgsl/msm_kgsl.h519
-rw-r--r--chromium/third_party/libdrm/src/freedreno/libdrm_freedreno.pc.in11
-rw-r--r--chromium/third_party/libdrm/src/freedreno/msm/msm_bo.c161
-rw-r--r--chromium/third_party/libdrm/src/freedreno/msm/msm_device.c68
-rw-r--r--chromium/third_party/libdrm/src/freedreno/msm/msm_drm.h259
-rw-r--r--chromium/third_party/libdrm/src/freedreno/msm/msm_pipe.c165
-rw-r--r--chromium/third_party/libdrm/src/freedreno/msm/msm_priv.h103
-rw-r--r--chromium/third_party/libdrm/src/freedreno/msm/msm_ringbuffer.c581
-rw-r--r--chromium/third_party/libdrm/src/include/drm/amdgpu_drm.h645
-rw-r--r--chromium/third_party/libdrm/src/include/drm/drm.h882
-rw-r--r--chromium/third_party/libdrm/src/include/drm/drm_fourcc.h251
-rw-r--r--chromium/third_party/libdrm/src/include/drm/drm_mode.h651
-rw-r--r--chromium/third_party/libdrm/src/include/drm/drm_sarea.h84
-rw-r--r--chromium/third_party/libdrm/src/include/drm/evdi_drm.h85
-rw-r--r--chromium/third_party/libdrm/src/include/drm/i915_drm.h1173
-rw-r--r--chromium/third_party/libdrm/src/include/drm/mach64_drm.h256
-rw-r--r--chromium/third_party/libdrm/src/include/drm/mga_drm.h419
-rw-r--r--chromium/third_party/libdrm/src/include/drm/nouveau_drm.h210
-rw-r--r--chromium/third_party/libdrm/src/include/drm/qxl_drm.h152
-rw-r--r--chromium/third_party/libdrm/src/include/drm/r128_drm.h326
-rw-r--r--chromium/third_party/libdrm/src/include/drm/radeon_drm.h1046
-rw-r--r--chromium/third_party/libdrm/src/include/drm/savage_drm.h210
-rw-r--r--chromium/third_party/libdrm/src/include/drm/sis_drm.h67
-rw-r--r--chromium/third_party/libdrm/src/include/drm/tegra_drm.h201
-rw-r--r--chromium/third_party/libdrm/src/include/drm/vc4_drm.h300
-rw-r--r--chromium/third_party/libdrm/src/include/drm/vgem_drm.h33
-rw-r--r--chromium/third_party/libdrm/src/include/drm/via_drm.h275
-rw-r--r--chromium/third_party/libdrm/src/include/drm/virtgpu_drm.h174
-rw-r--r--chromium/third_party/libdrm/src/include/drm/vmwgfx_drm.h1090
-rw-r--r--chromium/third_party/libdrm/src/intel/Android.mk43
-rw-r--r--chromium/third_party/libdrm/src/intel/Makefile.am73
-rw-r--r--chromium/third_party/libdrm/src/intel/Makefile.sources14
-rwxr-xr-xchromium/third_party/libdrm/src/intel/intel-symbol-check90
-rw-r--r--chromium/third_party/libdrm/src/intel/intel_aub.h153
-rw-r--r--chromium/third_party/libdrm/src/intel/intel_bufmgr.c374
-rw-r--r--chromium/third_party/libdrm/src/intel/intel_bufmgr.h321
-rw-r--r--chromium/third_party/libdrm/src/intel/intel_bufmgr_fake.c1630
-rw-r--r--chromium/third_party/libdrm/src/intel/intel_bufmgr_gem.c3522
-rw-r--r--chromium/third_party/libdrm/src/intel/intel_bufmgr_priv.h325
-rw-r--r--chromium/third_party/libdrm/src/intel/intel_chipset.h462
-rw-r--r--chromium/third_party/libdrm/src/intel/intel_debug.h44
-rw-r--r--chromium/third_party/libdrm/src/intel/intel_decode.c3983
-rw-r--r--chromium/third_party/libdrm/src/intel/libdrm_intel.pc.in11
-rw-r--r--chromium/third_party/libdrm/src/intel/mm.c264
-rw-r--r--chromium/third_party/libdrm/src/intel/mm.h84
-rw-r--r--chromium/third_party/libdrm/src/intel/test_decode.c198
-rw-r--r--chromium/third_party/libdrm/src/libdrm.pc.in10
-rw-r--r--chromium/third_party/libdrm/src/libdrm_lists.h118
-rw-r--r--chromium/third_party/libdrm/src/libdrm_macros.h87
-rw-r--r--chromium/third_party/libdrm/src/libkms/Android.mk57
-rw-r--r--chromium/third_party/libdrm/src/libkms/Makefile.am52
-rw-r--r--chromium/third_party/libdrm/src/libkms/Makefile.sources26
-rw-r--r--chromium/third_party/libdrm/src/libkms/api.c143
-rw-r--r--chromium/third_party/libdrm/src/libkms/dumb.c220
-rw-r--r--chromium/third_party/libdrm/src/libkms/exynos.c209
-rw-r--r--chromium/third_party/libdrm/src/libkms/intel.c240
-rw-r--r--chromium/third_party/libdrm/src/libkms/internal.h86
-rwxr-xr-xchromium/third_party/libdrm/src/libkms/kms-symbol-check25
-rw-r--r--chromium/third_party/libdrm/src/libkms/libkms.h82
-rw-r--r--chromium/third_party/libdrm/src/libkms/libkms.pc.in11
-rw-r--r--chromium/third_party/libdrm/src/libkms/linux.c250
-rw-r--r--chromium/third_party/libdrm/src/libkms/mediatek.c222
-rw-r--r--chromium/third_party/libdrm/src/libkms/nouveau.c222
-rw-r--r--chromium/third_party/libdrm/src/libkms/radeon.c243
-rw-r--r--chromium/third_party/libdrm/src/libkms/vmwgfx.c208
-rw-r--r--chromium/third_party/libdrm/src/man/Makefile.am62
-rw-r--r--chromium/third_party/libdrm/src/man/drm-kms.xml342
-rw-r--r--chromium/third_party/libdrm/src/man/drm-memory.xml430
-rw-r--r--chromium/third_party/libdrm/src/man/drm.xml137
-rw-r--r--chromium/third_party/libdrm/src/man/drmAvailable.xml75
-rw-r--r--chromium/third_party/libdrm/src/man/drmHandleEvent.xml102
-rw-r--r--chromium/third_party/libdrm/src/man/drmModeGetResources.xml139
-rw-r--r--chromium/third_party/libdrm/src/mediatek/Android.mk31
-rw-r--r--chromium/third_party/libdrm/src/mediatek/Makefile.am20
-rw-r--r--chromium/third_party/libdrm/src/mediatek/libdrm_mediatek.pc.in11
-rw-r--r--chromium/third_party/libdrm/src/mediatek/mediatek_drm.c299
-rw-r--r--chromium/third_party/libdrm/src/mediatek/mediatek_drm.h59
-rw-r--r--chromium/third_party/libdrm/src/mediatek/mediatek_drmif.h79
-rw-r--r--chromium/third_party/libdrm/src/nouveau/Android.mk18
-rw-r--r--chromium/third_party/libdrm/src/nouveau/Makefile.am33
-rw-r--r--chromium/third_party/libdrm/src/nouveau/Makefile.sources9
-rw-r--r--chromium/third_party/libdrm/src/nouveau/abi16.c363
-rw-r--r--chromium/third_party/libdrm/src/nouveau/bufctx.c164
-rw-r--r--chromium/third_party/libdrm/src/nouveau/libdrm_nouveau.pc.in11
-rwxr-xr-xchromium/third_party/libdrm/src/nouveau/nouveau-symbol-check58
-rw-r--r--chromium/third_party/libdrm/src/nouveau/nouveau.c878
-rw-r--r--chromium/third_party/libdrm/src/nouveau/nouveau.h276
-rw-r--r--chromium/third_party/libdrm/src/nouveau/nvif/cl0080.h45
-rw-r--r--chromium/third_party/libdrm/src/nouveau/nvif/cl9097.h44
-rw-r--r--chromium/third_party/libdrm/src/nouveau/nvif/class.h141
-rw-r--r--chromium/third_party/libdrm/src/nouveau/nvif/if0002.h38
-rw-r--r--chromium/third_party/libdrm/src/nouveau/nvif/if0003.h33
-rw-r--r--chromium/third_party/libdrm/src/nouveau/nvif/ioctl.h132
-rw-r--r--chromium/third_party/libdrm/src/nouveau/nvif/unpack.h28
-rw-r--r--chromium/third_party/libdrm/src/nouveau/private.h124
-rw-r--r--chromium/third_party/libdrm/src/nouveau/pushbuf.c781
-rw-r--r--chromium/third_party/libdrm/src/omap/Makefile.am24
-rw-r--r--chromium/third_party/libdrm/src/omap/libdrm_omap.pc.in11
-rwxr-xr-xchromium/third_party/libdrm/src/omap/omap-symbol-check35
-rw-r--r--chromium/third_party/libdrm/src/omap/omap_drm.c478
-rw-r--r--chromium/third_party/libdrm/src/omap/omap_drm.h135
-rw-r--r--chromium/third_party/libdrm/src/omap/omap_drmif.h65
-rw-r--r--chromium/third_party/libdrm/src/radeon/Android.mk18
-rw-r--r--chromium/third_party/libdrm/src/radeon/Makefile.am47
-rw-r--r--chromium/third_party/libdrm/src/radeon/Makefile.sources21
-rw-r--r--chromium/third_party/libdrm/src/radeon/bof.c477
-rw-r--r--chromium/third_party/libdrm/src/radeon/bof.h90
-rw-r--r--chromium/third_party/libdrm/src/radeon/libdrm_radeon.pc.in11
-rw-r--r--chromium/third_party/libdrm/src/radeon/r600_pci_ids.h487
-rwxr-xr-xchromium/third_party/libdrm/src/radeon/radeon-symbol-check61
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_bo.c145
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_bo.h74
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_bo_gem.c407
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_bo_gem.h48
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_bo_int.h45
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_cs.c98
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_cs.h141
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_cs_gem.c559
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_cs_gem.h41
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_cs_int.h67
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_cs_space.c251
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_surface.c2560
-rw-r--r--chromium/third_party/libdrm/src/radeon/radeon_surface.h149
-rw-r--r--chromium/third_party/libdrm/src/rockchip/Android.mk27
-rw-r--r--chromium/third_party/libdrm/src/rockchip/Makefile.am20
-rw-r--r--chromium/third_party/libdrm/src/rockchip/libdrm_rockchip.pc.in11
-rw-r--r--chromium/third_party/libdrm/src/rockchip/rockchip_drm.c299
-rw-r--r--chromium/third_party/libdrm/src/rockchip/rockchip_drm.h73
-rw-r--r--chromium/third_party/libdrm/src/rockchip/rockchip_drmif.h77
-rw-r--r--chromium/third_party/libdrm/src/tegra/Makefile.am25
-rw-r--r--chromium/third_party/libdrm/src/tegra/libdrm_tegra.pc.in11
-rw-r--r--chromium/third_party/libdrm/src/tegra/private.h51
-rwxr-xr-xchromium/third_party/libdrm/src/tegra/tegra-symbol-check30
-rw-r--r--chromium/third_party/libdrm/src/tegra/tegra.c332
-rw-r--r--chromium/third_party/libdrm/src/tegra/tegra.h60
-rw-r--r--chromium/third_party/libdrm/src/util_double_list.h143
-rw-r--r--chromium/third_party/libdrm/src/util_math.h34
-rw-r--r--chromium/third_party/libdrm/src/vc4/Makefile.am34
-rw-r--r--chromium/third_party/libdrm/src/vc4/Makefile.sources3
-rw-r--r--chromium/third_party/libdrm/src/vc4/libdrm_vc4.pc.in9
-rw-r--r--chromium/third_party/libdrm/src/vc4/vc4_packet.h397
-rw-r--r--chromium/third_party/libdrm/src/vc4/vc4_qpu_defines.h274
-rw-r--r--chromium/third_party/libdrm/src/xf86atomic.h117
-rw-r--r--chromium/third_party/libdrm/src/xf86drm.c3307
-rw-r--r--chromium/third_party/libdrm/src/xf86drm.h803
-rw-r--r--chromium/third_party/libdrm/src/xf86drmHash.c253
-rw-r--r--chromium/third_party/libdrm/src/xf86drmHash.h47
-rw-r--r--chromium/third_party/libdrm/src/xf86drmMode.c1502
-rw-r--r--chromium/third_party/libdrm/src/xf86drmMode.h526
-rw-r--r--chromium/third_party/libdrm/src/xf86drmRandom.c137
-rw-r--r--chromium/third_party/libdrm/src/xf86drmRandom.h35
-rw-r--r--chromium/third_party/libdrm/src/xf86drmSL.c318
208 files changed, 55573 insertions, 0 deletions
diff --git a/chromium/third_party/libdrm/BUILD.gn b/chromium/third_party/libdrm/BUILD.gn
new file mode 100644
index 00000000000..7f040070ed3
--- /dev/null
+++ b/chromium/third_party/libdrm/BUILD.gn
@@ -0,0 +1,54 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import("//build/config/linux/pkg_config.gni")
+
+assert(is_linux)
+
+declare_args() {
+ # Controls whether the build should use the version of libdrm
+ # library shipped with the system. In release builds of Chrome OS we
+ # use the system version, but when building on dev workstations we
+ # bundle it because Ubuntu doesn't ship a usable version.
+ use_system_libdrm = false
+}
+
+if (!use_system_libdrm) {
+ config("libdrm_config") {
+ include_dirs = [
+ "src",
+ "src/include",
+ "src/include/drm",
+ ]
+ if (is_clang) {
+ cflags = [ "-Wno-enum-conversion" ]
+ }
+ }
+
+ static_library("libdrm") {
+ sources = [
+ "src/xf86drm.c",
+ "src/xf86drmHash.c",
+ "src/xf86drmMode.c",
+ "src/xf86drmRandom.c",
+ ]
+
+ include_dirs = [
+ "src",
+ "src/include",
+ ]
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+
+ public_configs = [ ":libdrm_config" ]
+ }
+}
+
+if (use_system_libdrm) {
+ pkg_config("libdrm_config") {
+ packages = [ "libdrm" ]
+ }
+ group("libdrm") {
+ public_configs = [ ":libdrm_config" ]
+ }
+}
diff --git a/chromium/third_party/libdrm/OWNERS b/chromium/third_party/libdrm/OWNERS
new file mode 100644
index 00000000000..2150aae76d9
--- /dev/null
+++ b/chromium/third_party/libdrm/OWNERS
@@ -0,0 +1,3 @@
+dcastagna@chromium.org
+hoegsberg@chromium.org
+reveman@chromium.org \ No newline at end of file
diff --git a/chromium/third_party/libdrm/README.chromium b/chromium/third_party/libdrm/README.chromium
new file mode 100644
index 00000000000..678d4beebee
--- /dev/null
+++ b/chromium/third_party/libdrm/README.chromium
@@ -0,0 +1,13 @@
+Name: libdrm
+Short Name: libdrm
+URL: https://chromium.googlesource.com/chromiumos/third_party/libdrm
+Version: 2.4.70
+License: MIT, GPL
+License File: NOT_SHIPPED
+Security Critical: yes
+
+Description:
+Userspace interface to kernel DRM services.
+
+Local Modifications:
+None
diff --git a/chromium/third_party/libdrm/src/Android.mk b/chromium/third_party/libdrm/src/Android.mk
new file mode 100644
index 00000000000..2aa2bc964df
--- /dev/null
+++ b/chromium/third_party/libdrm/src/Android.mk
@@ -0,0 +1,66 @@
+#
+# Copyright © 2011-2012 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+# Import variables LIBDRM_{,H_,INCLUDE_H_,INCLUDE_VMWGFX_H_}FILES
+include $(LOCAL_PATH)/Makefile.sources
+
+#static library for the device (recovery)
+include $(CLEAR_VARS)
+LOCAL_MODULE := libdrm
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SRC_FILES := $(patsubst %.h, , $(LIBDRM_FILES))
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ $(LOCAL_PATH) \
+ $(LOCAL_PATH)/include/drm
+
+LOCAL_C_INCLUDES := \
+ $(LOCAL_PATH)/include/drm
+
+LOCAL_CFLAGS := \
+ -DHAVE_VISIBILITY=1 \
+ -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
+include $(BUILD_STATIC_LIBRARY)
+
+# Shared library for the device
+include $(CLEAR_VARS)
+LOCAL_MODULE := libdrm
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SRC_FILES := $(patsubst %.h, , $(LIBDRM_FILES))
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ $(LOCAL_PATH) \
+ $(LOCAL_PATH)/include/drm
+
+LOCAL_C_INCLUDES := \
+ $(LOCAL_PATH)/include/drm
+
+LOCAL_CFLAGS := \
+ -DHAVE_VISIBILITY=1 \
+ -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/chromium/third_party/libdrm/src/CleanSpec.mk b/chromium/third_party/libdrm/src/CleanSpec.mk
new file mode 100644
index 00000000000..28a11db4e7b
--- /dev/null
+++ b/chromium/third_party/libdrm/src/CleanSpec.mk
@@ -0,0 +1,4 @@
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/include/libdrm)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/include/freedreno)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libdrm_*intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libdrm_*intermediates)
diff --git a/chromium/third_party/libdrm/src/Makefile.am b/chromium/third_party/libdrm/src/Makefile.am
new file mode 100644
index 00000000000..fcb21b65bdd
--- /dev/null
+++ b/chromium/third_party/libdrm/src/Makefile.am
@@ -0,0 +1,146 @@
+# Copyright 2005 Adam Jackson.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# on the rights to use, copy, modify, merge, publish, distribute, sub
+# license, and/or sell copies of the Software, and to permit persons to whom
+# the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+# ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+include Makefile.sources
+
+ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS}
+
+AM_DISTCHECK_CONFIGURE_FLAGS = \
+ --enable-udev \
+ --enable-libkms \
+ --enable-intel \
+ --enable-radeon \
+ --enable-amdgpu \
+ --enable-nouveau \
+ --enable-vc4 \
+ --enable-vmwgfx \
+ --enable-omap-experimental-api \
+ --enable-exynos-experimental-api \
+ --enable-freedreno \
+ --enable-freedreno-kgsl\
+ --enable-tegra-experimental-api \
+ --enable-install-test-programs \
+ --enable-cairo-tests \
+ --enable-manpages \
+ --enable-valgrind
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm.pc
+
+if HAVE_LIBKMS
+LIBKMS_SUBDIR = libkms
+endif
+
+if HAVE_INTEL
+INTEL_SUBDIR = intel
+endif
+
+if HAVE_NOUVEAU
+NOUVEAU_SUBDIR = nouveau
+endif
+
+if HAVE_RADEON
+RADEON_SUBDIR = radeon
+endif
+
+if HAVE_AMDGPU
+AMDGPU_SUBDIR = amdgpu
+endif
+
+if HAVE_OMAP
+OMAP_SUBDIR = omap
+endif
+
+if HAVE_EXYNOS
+EXYNOS_SUBDIR = exynos
+endif
+
+if HAVE_FREEDRENO
+FREEDRENO_SUBDIR = freedreno
+endif
+
+if HAVE_TEGRA
+TEGRA_SUBDIR = tegra
+endif
+
+if HAVE_VC4
+VC4_SUBDIR = vc4
+endif
+
+if HAVE_ROCKCHIP
+ROCKCHIP_SUBDIR = rockchip
+endif
+
+if HAVE_MEDIATEK
+MEDIATEK_SUBDIR = mediatek
+endif
+
+if BUILD_MANPAGES
+if HAVE_MANPAGES_STYLESHEET
+MAN_SUBDIR = man
+endif
+endif
+
+SUBDIRS = \
+ . \
+ $(LIBKMS_SUBDIR) \
+ $(INTEL_SUBDIR) \
+ $(NOUVEAU_SUBDIR) \
+ $(RADEON_SUBDIR) \
+ $(AMDGPU_SUBDIR) \
+ $(OMAP_SUBDIR) \
+ $(EXYNOS_SUBDIR) \
+ $(FREEDRENO_SUBDIR) \
+ $(TEGRA_SUBDIR) \
+ $(VC4_SUBDIR) \
+ $(ROCKCHIP_SUBDIR) \
+ $(MEDIATEK_SUBDIR) \
+ tests \
+ $(MAN_SUBDIR)
+
+libdrm_la_LTLIBRARIES = libdrm.la
+libdrm_ladir = $(libdir)
+libdrm_la_LDFLAGS = -version-number 2:4:0 -no-undefined
+libdrm_la_LIBADD = @CLOCK_LIB@ -lm
+
+libdrm_la_CPPFLAGS = -I$(top_srcdir)/include/drm
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ $(VALGRIND_CFLAGS)
+
+libdrm_la_SOURCES = $(LIBDRM_FILES)
+
+libdrmincludedir = ${includedir}
+libdrminclude_HEADERS = $(LIBDRM_H_FILES)
+
+klibdrmincludedir = ${includedir}/libdrm
+klibdrminclude_HEADERS = $(LIBDRM_INCLUDE_H_FILES)
+
+if HAVE_VMWGFX
+klibdrminclude_HEADERS += $(LIBDRM_INCLUDE_VMWGFX_H_FILES)
+endif
+
+
+copy-headers :
+ cp -r $(kernel_source)/include/uapi/drm/*.h $(top_srcdir)/include/drm/
+
+commit-headers : copy-headers
+ git add include/drm/*.h
+ git commit -am "Copy headers from kernel $$(GIT_DIR=$(kernel_source)/.git git describe)"
diff --git a/chromium/third_party/libdrm/src/Makefile.sources b/chromium/third_party/libdrm/src/Makefile.sources
new file mode 100644
index 00000000000..8c7ddace01f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/Makefile.sources
@@ -0,0 +1,42 @@
+LIBDRM_FILES := \
+ xf86drm.c \
+ xf86drmHash.c \
+ xf86drmHash.h \
+ xf86drmRandom.c \
+ xf86drmRandom.h \
+ xf86drmSL.c \
+ xf86drmMode.c \
+ xf86atomic.h \
+ libdrm_macros.h \
+ libdrm_lists.h \
+ util_double_list.h \
+ util_math.h
+
+LIBDRM_H_FILES := \
+ xf86drm.h \
+ xf86drmMode.h
+
+LIBDRM_INCLUDE_H_FILES := \
+ include/drm/drm.h \
+ include/drm/drm_fourcc.h \
+ include/drm/drm_mode.h \
+ include/drm/drm_sarea.h \
+ include/drm/evdi_drm.h \
+ include/drm/i915_drm.h \
+ include/drm/mach64_drm.h \
+ include/drm/mga_drm.h \
+ include/drm/nouveau_drm.h \
+ include/drm/qxl_drm.h \
+ include/drm/r128_drm.h \
+ include/drm/radeon_drm.h \
+ include/drm/amdgpu_drm.h \
+ include/drm/savage_drm.h \
+ include/drm/sis_drm.h \
+ include/drm/tegra_drm.h \
+ include/drm/vc4_drm.h \
+ include/drm/vgem_drm.h \
+ include/drm/via_drm.h \
+ include/drm/virtgpu_drm.h
+
+LIBDRM_INCLUDE_VMWGFX_H_FILES := \
+ include/drm/vmwgfx_drm.h
diff --git a/chromium/third_party/libdrm/src/PRESUBMIT.cfg b/chromium/third_party/libdrm/src/PRESUBMIT.cfg
new file mode 100644
index 00000000000..9b0d5a48ce8
--- /dev/null
+++ b/chromium/third_party/libdrm/src/PRESUBMIT.cfg
@@ -0,0 +1,3 @@
+[Hook Overrides]
+cros_license_check: false
+tab_check: false
diff --git a/chromium/third_party/libdrm/src/README b/chromium/third_party/libdrm/src/README
new file mode 100644
index 00000000000..603a1c10a63
--- /dev/null
+++ b/chromium/third_party/libdrm/src/README
@@ -0,0 +1,41 @@
+libdrm - userspace library for drm
+
+This is libdrm, a userspace library for accessing the DRM, direct
+rendering manager, on Linux, BSD and other operating systes that
+support the ioctl interface. The library provides wrapper functions
+for the ioctls to avoid exposing the kernel interface directly, and
+for chipsets with drm memory manager, support for tracking relocations
+and buffers. libdrm is a low-level library, typically used by
+graphics drivers such as the Mesa DRI drivers, the X drivers, libva
+and similar projects. New functionality in the kernel DRM drivers
+typically requires a new libdrm, but a new libdrm will always work
+with an older kernel.
+
+
+Compiling
+---------
+
+libdrm is a standard autotools packages and follows the normal
+configure, build and install steps. The first step is to configure
+the package, which is done by running the configure shell script:
+
+ ./configure
+
+By default, libdrm will install into the /usr/local/ prefix. If you
+want to install this DRM to replace your system copy, pass
+--prefix=/usr and --exec-prefix=/ to configure. If you are building
+libdrm from a git checkout, you first need to run the autogen.sh
+script. You can pass any options to autogen.sh that you would other
+wise pass to configure, or you can just re-run configure with the
+options you need once autogen.sh finishes.
+
+Next step is to build libdrm:
+
+ make
+
+and once make finishes successfully, install the package using
+
+ make install
+
+If you are install into a system location, you will need to be root to
+perform the install step.
diff --git a/chromium/third_party/libdrm/src/RELEASING b/chromium/third_party/libdrm/src/RELEASING
new file mode 100644
index 00000000000..62c5be9fafe
--- /dev/null
+++ b/chromium/third_party/libdrm/src/RELEASING
@@ -0,0 +1,65 @@
+The release criteria for libdrm is essentially "if you need a release,
+make one". There is no designated release engineer or maintainer.
+Anybody is free to make a release if there's a certain feature or bug
+fix they need in a released version of libdrm.
+
+When new ioctl definitions are merged into drm-next, we will add
+support to libdrm, at which point we typically create a new release.
+However, this is up to whoever is driving the feature in question.
+
+Follow these steps to release a new version of libdrm:
+
+ 1) Ensure that there are no local, uncommitted/unpushed
+ modifications. You're probably in a good state if both "git diff
+ HEAD" and "git log master..origin/master" give no output.
+
+ 2) Bump the version number in configure.ac. We seem to have settled
+ for 2.4.x as the versioning scheme for libdrm, so just bump the
+ micro version.
+
+ 3) Run autoconf and then re-run ./configure so the build system
+ picks up the new version number.
+
+ 4) (optional step, release.sh will make distcheck for you, but it can be
+ heart warming to verify that make distcheck passes)
+
+ Verify that the code passes "make distcheck". Running "make
+ distcheck" should result in no warnings or errors and end with a
+ message of the form:
+
+ =============================================
+ libdrm-X.Y.Z archives ready for distribution:
+ libdrm-X.Y.Z.tar.gz
+ libdrm-X.Y.Z.tar.bz2
+ =============================================
+
+ Make sure that the version number reported by distcheck and in
+ the tarball names matches the number you bumped to in configure.ac.
+
+ 5) Commit the configure.ac change and make an annotated tag for that
+ commit with the version number of the release as the name and a
+ message of "libdrm X.Y.Z". For example, for the 2.4.16 release
+ the command is:
+
+ git tag -a 2.4.16 -m "libdrm 2.4.16"
+
+ 6) Push the commit and tag by saying
+
+ git push --tags origin master
+
+ assuming the remote for the upstream libdrm repo is called origin.
+
+ 7) Use the release.sh script from the xorg/util/modular repo to
+ upload the tarballs to the freedesktop.org download area and
+ create an announce email template. The script takes one argument:
+ the path to the libdrm checkout. So, if a checkout of modular is
+ at the same level than the libdrm repo:
+
+ ./modular/release.sh libdrm
+
+ This copies the two tarballs to freedesktop.org and creates
+ libdrm-2.4.16.announce which has a detailed summary of the
+ changes, links to the tarballs, MD5 and SHA1 sums and pre-filled
+ out email headers. Fill out the blank between the email headers
+ and the list of changes with a brief message of what changed or
+ what prompted this release. Send out the email and you're done!
diff --git a/chromium/third_party/libdrm/src/amdgpu/Android.mk b/chromium/third_party/libdrm/src/amdgpu/Android.mk
new file mode 100644
index 00000000000..57c53a7958c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/Android.mk
@@ -0,0 +1,18 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+# Import variables LIBDRM_AMDGPU_FILES, LIBDRM_AMDGPU_H_FILES
+include $(LOCAL_PATH)/Makefile.sources
+
+LOCAL_MODULE := libdrm_amdgpu
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES := $(patsubst %.h, , $(LIBDRM_AMDGPU_FILES))
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+
+LOCAL_CFLAGS := \
+ -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/chromium/third_party/libdrm/src/amdgpu/Makefile.am b/chromium/third_party/libdrm/src/amdgpu/Makefile.am
new file mode 100644
index 00000000000..cf7bc1ba88d
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/Makefile.am
@@ -0,0 +1,47 @@
+# Copyright © 2008 Jérôme Glisse
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+# Jérôme Glisse <glisse@freedesktop.org>
+
+include Makefile.sources
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ $(PTHREADSTUBS_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_amdgpu_la_LTLIBRARIES = libdrm_amdgpu.la
+libdrm_amdgpu_ladir = $(libdir)
+libdrm_amdgpu_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libdrm_amdgpu_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
+
+libdrm_amdgpu_la_SOURCES = $(LIBDRM_AMDGPU_FILES)
+
+libdrm_amdgpuincludedir = ${includedir}/libdrm
+libdrm_amdgpuinclude_HEADERS = $(LIBDRM_AMDGPU_H_FILES)
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm_amdgpu.pc
+
+TESTS = amdgpu-symbol-check
+EXTRA_DIST = $(TESTS)
diff --git a/chromium/third_party/libdrm/src/amdgpu/Makefile.sources b/chromium/third_party/libdrm/src/amdgpu/Makefile.sources
new file mode 100644
index 00000000000..0c0b9a93a60
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/Makefile.sources
@@ -0,0 +1,14 @@
+LIBDRM_AMDGPU_FILES := \
+ amdgpu_bo.c \
+ amdgpu_cs.c \
+ amdgpu_device.c \
+ amdgpu_gpu_info.c \
+ amdgpu_internal.h \
+ amdgpu_vamgr.c \
+ util_hash.c \
+ util_hash.h \
+ util_hash_table.c \
+ util_hash_table.h
+
+LIBDRM_AMDGPU_H_FILES := \
+ amdgpu.h
diff --git a/chromium/third_party/libdrm/src/amdgpu/amdgpu-symbol-check b/chromium/third_party/libdrm/src/amdgpu/amdgpu-symbol-check
new file mode 100755
index 00000000000..648db9b6c86
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/amdgpu-symbol-check
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# The following symbols (past the first five) are taken from the public headers.
+# A list of the latter should be available Makefile.am/libdrm_amdgpuinclude_HEADERS
+
+FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_amdgpu.so} | awk '{print $3}' | while read func; do
+( grep -q "^$func$" || echo $func ) <<EOF
+__bss_start
+_edata
+_end
+_fini
+_init
+amdgpu_bo_alloc
+amdgpu_bo_cpu_map
+amdgpu_bo_cpu_unmap
+amdgpu_bo_export
+amdgpu_bo_free
+amdgpu_bo_import
+amdgpu_bo_list_create
+amdgpu_bo_list_destroy
+amdgpu_bo_list_update
+amdgpu_bo_query_info
+amdgpu_bo_set_metadata
+amdgpu_bo_va_op
+amdgpu_bo_wait_for_idle
+amdgpu_create_bo_from_user_mem
+amdgpu_cs_create_semaphore
+amdgpu_cs_ctx_create
+amdgpu_cs_ctx_free
+amdgpu_cs_destroy_semaphore
+amdgpu_cs_query_fence_status
+amdgpu_cs_query_reset_state
+amdgpu_cs_signal_semaphore
+amdgpu_cs_submit
+amdgpu_cs_wait_semaphore
+amdgpu_device_deinitialize
+amdgpu_device_initialize
+amdgpu_query_buffer_size_alignment
+amdgpu_query_crtc_from_id
+amdgpu_query_firmware_version
+amdgpu_query_gds_info
+amdgpu_query_gpu_info
+amdgpu_query_heap_info
+amdgpu_query_hw_ip_count
+amdgpu_query_hw_ip_info
+amdgpu_query_info
+amdgpu_read_mm_registers
+amdgpu_va_range_alloc
+amdgpu_va_range_free
+amdgpu_va_range_query
+EOF
+done)
+
+test ! -n "$FUNCS" || echo $FUNCS
+test ! -n "$FUNCS"
diff --git a/chromium/third_party/libdrm/src/amdgpu/amdgpu.h b/chromium/third_party/libdrm/src/amdgpu/amdgpu.h
new file mode 100644
index 00000000000..5d5a2c67f2a
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/amdgpu.h
@@ -0,0 +1,1248 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * \file amdgpu.h
+ *
+ * Declare public libdrm_amdgpu API
+ *
+ * This file define API exposed by libdrm_amdgpu library.
+ * User wanted to use libdrm_amdgpu functionality must include
+ * this file.
+ *
+ */
+#ifndef _AMDGPU_H_
+#define _AMDGPU_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+struct drm_amdgpu_info_hw_ip;
+
+/*--------------------------------------------------------------------------*/
+/* --------------------------- Defines ------------------------------------ */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Define max. number of Command Buffers (IB) which could be sent to the single
+ * hardware IP to accommodate CE/DE requirements
+ *
+ * \sa amdgpu_cs_ib_info
+*/
+#define AMDGPU_CS_MAX_IBS_PER_SUBMIT 4
+
+/**
+ * Special timeout value meaning that the timeout is infinite.
+ */
+#define AMDGPU_TIMEOUT_INFINITE 0xffffffffffffffffull
+
+/**
+ * Used in amdgpu_cs_query_fence_status(), meaning that the given timeout
+ * is absolute.
+ */
+#define AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE (1 << 0)
+
+/*--------------------------------------------------------------------------*/
+/* ----------------------------- Enums ------------------------------------ */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Enum describing possible handle types
+ *
+ * \sa amdgpu_bo_import, amdgpu_bo_export
+ *
+*/
+enum amdgpu_bo_handle_type {
+ /** GEM flink name (needs DRM authentication, used by DRI2) */
+ amdgpu_bo_handle_type_gem_flink_name = 0,
+
+ /** KMS handle which is used by all driver ioctls */
+ amdgpu_bo_handle_type_kms = 1,
+
+ /** DMA-buf fd handle */
+ amdgpu_bo_handle_type_dma_buf_fd = 2
+};
+
+/** Define known types of GPU VM VA ranges */
+enum amdgpu_gpu_va_range
+{
+ /** Allocate from "normal"/general range */
+ amdgpu_gpu_va_range_general = 0
+};
+
+/*--------------------------------------------------------------------------*/
+/* -------------------------- Datatypes ----------------------------------- */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Define opaque pointer to context associated with fd.
+ * This context will be returned as the result of
+ * "initialize" function and should be pass as the first
+ * parameter to any API call
+ */
+typedef struct amdgpu_device *amdgpu_device_handle;
+
+/**
+ * Define GPU Context type as pointer to opaque structure
+ * Example of GPU Context is the "rendering" context associated
+ * with OpenGL context (glCreateContext)
+ */
+typedef struct amdgpu_context *amdgpu_context_handle;
+
+/**
+ * Define handle for amdgpu resources: buffer, GDS, etc.
+ */
+typedef struct amdgpu_bo *amdgpu_bo_handle;
+
+/**
+ * Define handle for list of BOs
+ */
+typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
+
+/**
+ * Define handle to be used to work with VA allocated ranges
+ */
+typedef struct amdgpu_va *amdgpu_va_handle;
+
+/**
+ * Define handle for semaphore
+ */
+typedef struct amdgpu_semaphore *amdgpu_semaphore_handle;
+
+/*--------------------------------------------------------------------------*/
+/* -------------------------- Structures ---------------------------------- */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Structure describing memory allocation request
+ *
+ * \sa amdgpu_bo_alloc()
+ *
+*/
+struct amdgpu_bo_alloc_request {
+ /** Allocation request. It must be aligned correctly. */
+ uint64_t alloc_size;
+
+ /**
+ * It may be required to have some specific alignment requirements
+ * for physical back-up storage (e.g. for displayable surface).
+ * If 0 there is no special alignment requirement
+ */
+ uint64_t phys_alignment;
+
+ /**
+ * UMD should specify where to allocate memory and how it
+ * will be accessed by the CPU.
+ */
+ uint32_t preferred_heap;
+
+ /** Additional flags passed on allocation */
+ uint64_t flags;
+};
+
+/**
+ * Special UMD specific information associated with buffer.
+ *
+ * It may be need to pass some buffer charactersitic as part
+ * of buffer sharing. Such information are defined UMD and
+ * opaque for libdrm_amdgpu as well for kernel driver.
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info,
+ * amdgpu_bo_import(), amdgpu_bo_export
+ *
+*/
+struct amdgpu_bo_metadata {
+ /** Special flag associated with surface */
+ uint64_t flags;
+
+ /**
+ * ASIC-specific tiling information (also used by DCE).
+ * The encoding is defined by the AMDGPU_TILING_* definitions.
+ */
+ uint64_t tiling_info;
+
+ /** Size of metadata associated with the buffer, in bytes. */
+ uint32_t size_metadata;
+
+ /** UMD specific metadata. Opaque for kernel */
+ uint32_t umd_metadata[64];
+};
+
+/**
+ * Structure describing allocated buffer. Client may need
+ * to query such information as part of 'sharing' buffers mechanism
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info(),
+ * amdgpu_bo_import(), amdgpu_bo_export()
+*/
+struct amdgpu_bo_info {
+ /** Allocated memory size */
+ uint64_t alloc_size;
+
+ /**
+ * It may be required to have some specific alignment requirements
+ * for physical back-up storage.
+ */
+ uint64_t phys_alignment;
+
+ /** Heap where to allocate memory. */
+ uint32_t preferred_heap;
+
+ /** Additional allocation flags. */
+ uint64_t alloc_flags;
+
+ /** Metadata associated with buffer if any. */
+ struct amdgpu_bo_metadata metadata;
+};
+
+/**
+ * Structure with information about "imported" buffer
+ *
+ * \sa amdgpu_bo_import()
+ *
+ */
+struct amdgpu_bo_import_result {
+ /** Handle of memory/buffer to use */
+ amdgpu_bo_handle buf_handle;
+
+ /** Buffer size */
+ uint64_t alloc_size;
+};
+
+/**
+ *
+ * Structure to describe GDS partitioning information.
+ * \note OA and GWS resources are asscoiated with GDS partition
+ *
+ * \sa amdgpu_gpu_resource_query_gds_info
+ *
+*/
+struct amdgpu_gds_resource_info {
+ uint32_t gds_gfx_partition_size;
+ uint32_t compute_partition_size;
+ uint32_t gds_total_size;
+ uint32_t gws_per_gfx_partition;
+ uint32_t gws_per_compute_partition;
+ uint32_t oa_per_gfx_partition;
+ uint32_t oa_per_compute_partition;
+};
+
+/**
+ * Structure describing CS fence
+ *
+ * \sa amdgpu_cs_query_fence_status(), amdgpu_cs_request, amdgpu_cs_submit()
+ *
+*/
+struct amdgpu_cs_fence {
+
+ /** In which context IB was sent to execution */
+ amdgpu_context_handle context;
+
+ /** To which HW IP type the fence belongs */
+ uint32_t ip_type;
+
+ /** IP instance index if there are several IPs of the same type. */
+ uint32_t ip_instance;
+
+ /** Ring index of the HW IP */
+ uint32_t ring;
+
+ /** Specify fence for which we need to check submission status.*/
+ uint64_t fence;
+};
+
+/**
+ * Structure describing IB
+ *
+ * \sa amdgpu_cs_request, amdgpu_cs_submit()
+ *
+*/
+struct amdgpu_cs_ib_info {
+ /** Special flags */
+ uint64_t flags;
+
+ /** Virtual MC address of the command buffer */
+ uint64_t ib_mc_address;
+
+ /**
+ * Size of Command Buffer to be submitted.
+ * - The size is in units of dwords (4 bytes).
+ * - Could be 0
+ */
+ uint32_t size;
+};
+
+/**
+ * Structure describing fence information
+ *
+ * \sa amdgpu_cs_request, amdgpu_cs_query_fence,
+ * amdgpu_cs_submit(), amdgpu_cs_query_fence_status()
+*/
+struct amdgpu_cs_fence_info {
+ /** buffer object for the fence */
+ amdgpu_bo_handle handle;
+
+ /** fence offset in the unit of sizeof(uint64_t) */
+ uint64_t offset;
+};
+
+/**
+ * Structure describing submission request
+ *
+ * \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx
+ *
+ * \sa amdgpu_cs_submit()
+*/
+struct amdgpu_cs_request {
+ /** Specify flags with additional information */
+ uint64_t flags;
+
+ /** Specify HW IP block type to which to send the IB. */
+ unsigned ip_type;
+
+ /** IP instance index if there are several IPs of the same type. */
+ unsigned ip_instance;
+
+ /**
+ * Specify ring index of the IP. We could have several rings
+ * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
+ */
+ uint32_t ring;
+
+ /**
+ * List handle with resources used by this request.
+ */
+ amdgpu_bo_list_handle resources;
+
+ /**
+ * Number of dependencies this Command submission needs to
+ * wait for before starting execution.
+ */
+ uint32_t number_of_dependencies;
+
+ /**
+ * Array of dependencies which need to be met before
+ * execution can start.
+ */
+ struct amdgpu_cs_fence *dependencies;
+
+ /** Number of IBs to submit in the field ibs. */
+ uint32_t number_of_ibs;
+
+ /**
+ * IBs to submit. Those IBs will be submit together as single entity
+ */
+ struct amdgpu_cs_ib_info *ibs;
+
+ /**
+ * The returned sequence number for the command submission
+ */
+ uint64_t seq_no;
+
+ /**
+ * The fence information
+ */
+ struct amdgpu_cs_fence_info fence_info;
+};
+
+/**
+ * Structure which provide information about GPU VM MC Address space
+ * alignments requirements
+ *
+ * \sa amdgpu_query_buffer_size_alignment
+ */
+struct amdgpu_buffer_size_alignments {
+ /** Size alignment requirement for allocation in
+ * local memory */
+ uint64_t size_local;
+
+ /**
+ * Size alignment requirement for allocation in remote memory
+ */
+ uint64_t size_remote;
+};
+
+/**
+ * Structure which provide information about heap
+ *
+ * \sa amdgpu_query_heap_info()
+ *
+ */
+struct amdgpu_heap_info {
+ /** Theoretical max. available memory in the given heap */
+ uint64_t heap_size;
+
+ /**
+ * Number of bytes allocated in the heap. This includes all processes
+ * and private allocations in the kernel. It changes when new buffers
+ * are allocated, freed, and moved. It cannot be larger than
+ * heap_size.
+ */
+ uint64_t heap_usage;
+
+ /**
+ * Theoretical possible max. size of buffer which
+ * could be allocated in the given heap
+ */
+ uint64_t max_allocation;
+};
+
+/**
+ * Describe GPU h/w info needed for UMD correct initialization
+ *
+ * \sa amdgpu_query_gpu_info()
+*/
+struct amdgpu_gpu_info {
+ /** Asic id */
+ uint32_t asic_id;
+ /** Chip revision */
+ uint32_t chip_rev;
+ /** Chip external revision */
+ uint32_t chip_external_rev;
+ /** Family ID */
+ uint32_t family_id;
+ /** Special flags */
+ uint64_t ids_flags;
+ /** max engine clock*/
+ uint64_t max_engine_clk;
+ /** max memory clock */
+ uint64_t max_memory_clk;
+ /** number of shader engines */
+ uint32_t num_shader_engines;
+ /** number of shader arrays per engine */
+ uint32_t num_shader_arrays_per_engine;
+ /** Number of available good shader pipes */
+ uint32_t avail_quad_shader_pipes;
+ /** Max. number of shader pipes.(including good and bad pipes */
+ uint32_t max_quad_shader_pipes;
+ /** Number of parameter cache entries per shader quad pipe */
+ uint32_t cache_entries_per_quad_pipe;
+ /** Number of available graphics context */
+ uint32_t num_hw_gfx_contexts;
+ /** Number of render backend pipes */
+ uint32_t rb_pipes;
+ /** Enabled render backend pipe mask */
+ uint32_t enabled_rb_pipes_mask;
+ /** Frequency of GPU Counter */
+ uint32_t gpu_counter_freq;
+ /** CC_RB_BACKEND_DISABLE.BACKEND_DISABLE per SE */
+ uint32_t backend_disable[4];
+ /** Value of MC_ARB_RAMCFG register*/
+ uint32_t mc_arb_ramcfg;
+ /** Value of GB_ADDR_CONFIG */
+ uint32_t gb_addr_cfg;
+ /** Values of the GB_TILE_MODE0..31 registers */
+ uint32_t gb_tile_mode[32];
+ /** Values of GB_MACROTILE_MODE0..15 registers */
+ uint32_t gb_macro_tile_mode[16];
+ /** Value of PA_SC_RASTER_CONFIG register per SE */
+ uint32_t pa_sc_raster_cfg[4];
+ /** Value of PA_SC_RASTER_CONFIG_1 register per SE */
+ uint32_t pa_sc_raster_cfg1[4];
+ /* CU info */
+ uint32_t cu_active_number;
+ uint32_t cu_ao_mask;
+ uint32_t cu_bitmap[4][4];
+ /* video memory type info*/
+ uint32_t vram_type;
+ /* video memory bit width*/
+ uint32_t vram_bit_width;
+ /** constant engine ram size*/
+ uint32_t ce_ram_size;
+ /* vce harvesting instance */
+ uint32_t vce_harvest_config;
+ /* PCI revision ID */
+ uint32_t pci_rev_id;
+};
+
+
+/*--------------------------------------------------------------------------*/
+/*------------------------- Functions --------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Initialization / Cleanup
+ *
+*/
+
+/**
+ *
+ * \param fd - \c [in] File descriptor for AMD GPU device
+ * received previously as the result of
+ * e.g. drmOpen() call.
+ * For legacy fd type, the DRI2/DRI3
+ * authentication should be done before
+ * calling this function.
+ * \param major_version - \c [out] Major version of library. It is assumed
+ * that adding new functionality will cause
+ * increase in major version
+ * \param minor_version - \c [out] Minor version of library
+ * \param device_handle - \c [out] Pointer to opaque context which should
+ * be passed as the first parameter on each
+ * API call
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ *
+ * \sa amdgpu_device_deinitialize()
+*/
+int amdgpu_device_initialize(int fd,
+ uint32_t *major_version,
+ uint32_t *minor_version,
+ amdgpu_device_handle *device_handle);
+
+/**
+ *
+ * When access to such library does not needed any more the special
+ * function must be call giving opportunity to clean up any
+ * resources if needed.
+ *
+ * \param device_handle - \c [in] Context associated with file
+ * descriptor for AMD GPU device
+ * received previously as the
+ * result e.g. of drmOpen() call.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_device_initialize()
+ *
+*/
+int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
+
+/*
+ * Memory Management
+ *
+*/
+
+/**
+ * Allocate memory to be used by UMD for GPU related operations
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param alloc_buffer - \c [in] Pointer to the structure describing an
+ * allocation request
+ * \param buf_handle - \c [out] Allocated buffer handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_free()
+*/
+int amdgpu_bo_alloc(amdgpu_device_handle dev,
+ struct amdgpu_bo_alloc_request *alloc_buffer,
+ amdgpu_bo_handle *buf_handle);
+
+/**
+ * Associate opaque data with buffer to be queried by another UMD
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle
+ * \param info - \c [in] Metadata to associated with buffer
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+*/
+int amdgpu_bo_set_metadata(amdgpu_bo_handle buf_handle,
+ struct amdgpu_bo_metadata *info);
+
+/**
+ * Query buffer information including metadata previusly associated with
+ * buffer.
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle
+ * \param info - \c [out] Structure describing buffer
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
+*/
+int amdgpu_bo_query_info(amdgpu_bo_handle buf_handle,
+ struct amdgpu_bo_info *info);
+
+/**
+ * Allow others to get access to buffer
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle
+ * \param type - \c [in] Type of handle requested
+ * \param shared_handle - \c [out] Special "shared" handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_import()
+ *
+*/
+int amdgpu_bo_export(amdgpu_bo_handle buf_handle,
+ enum amdgpu_bo_handle_type type,
+ uint32_t *shared_handle);
+
+/**
+ * Request access to "shared" buffer
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param type - \c [in] Type of handle requested
+ * \param shared_handle - \c [in] Shared handle received as result "import"
+ * operation
+ * \param output - \c [out] Pointer to structure with information
+ * about imported buffer
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note Buffer must be "imported" only using new "fd" (different from
+ * one used by "exporter").
+ *
+ * \sa amdgpu_bo_export()
+ *
+*/
+int amdgpu_bo_import(amdgpu_device_handle dev,
+ enum amdgpu_bo_handle_type type,
+ uint32_t shared_handle,
+ struct amdgpu_bo_import_result *output);
+
+/**
+ * Request GPU access to user allocated memory e.g. via "malloc"
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param cpu - [in] CPU address of user allocated memory which we
+ * want to map to GPU address space (make GPU accessible)
+ * (This address must be correctly aligned).
+ * \param size - [in] Size of allocation (must be correctly aligned)
+ * \param buf_handle - [out] Buffer handle for the userptr memory
+ * resource on submission and be used in other operations.
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note
+ * This call doesn't guarantee that such memory will be persistently
+ * "locked" / make non-pageable. The purpose of this call is to provide
+ * opportunity for GPU get access to this resource during submission.
+ *
+ * The maximum amount of memory which could be mapped in this call depends
+ * if overcommit is disabled or not. If overcommit is disabled than the max.
+ * amount of memory to be pinned will be limited by left "free" size in total
+ * amount of memory which could be locked simultaneously ("GART" size).
+ *
+ * Supported (theoretical) max. size of mapping is restricted only by
+ * "GART" size.
+ *
+ * It is responsibility of caller to correctly specify access rights
+ * on VA assignment.
+*/
+int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
+ void *cpu, uint64_t size,
+ amdgpu_bo_handle *buf_handle);
+
+/**
+ * Free previosuly allocated memory
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle to free
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note In the case of memory shared between different applications all
+ * resources will be “physically†freed only all such applications
+ * will be terminated
+ * \note If is UMD responsibility to ‘free’ buffer only when there is no
+ * more GPU access
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
+ *
+*/
+int amdgpu_bo_free(amdgpu_bo_handle buf_handle);
+
+/**
+ * Request CPU access to GPU accessible memory
+ *
+ * \param buf_handle - \c [in] Buffer handle
+ * \param cpu - \c [out] CPU address to be used for access
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_cpu_unmap()
+ *
+*/
+int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu);
+
+/**
+ * Release CPU access to GPU memory
+ *
+ * \param buf_handle - \c [in] Buffer handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_cpu_map()
+ *
+*/
+int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
+
+/**
+ * Wait until a buffer is not used by the device.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle.
+ * \param timeout_ns - Timeout in nanoseconds.
+ * \param buffer_busy - 0 if buffer is idle, all GPU access was completed
+ * and no GPU access is scheduled.
+ * 1 GPU access is in fly or scheduled
+ *
+ * \return 0 - on success
+ * <0 - Negative POSIX Error code
+ */
+int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
+ uint64_t timeout_ns,
+ bool *buffer_busy);
+
+/**
+ * Creates a BO list handle for command submission.
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param number_of_resources - \c [in] Number of BOs in the list
+ * \param resources - \c [in] List of BO handles
+ * \param resource_prios - \c [in] Optional priority for each handle
+ * \param result - \c [out] Created BO list handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_destroy()
+*/
+int amdgpu_bo_list_create(amdgpu_device_handle dev,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios,
+ amdgpu_bo_list_handle *result);
+
+/**
+ * Destroys a BO list handle.
+ *
+ * \param handle - \c [in] BO list handle.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create()
+*/
+int amdgpu_bo_list_destroy(amdgpu_bo_list_handle handle);
+
+/**
+ * Update resources for existing BO list
+ *
+ * \param handle - \c [in] BO list handle
+ * \param number_of_resources - \c [in] Number of BOs in the list
+ * \param resources - \c [in] List of BO handles
+ * \param resource_prios - \c [in] Optional priority for each handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_update()
+*/
+int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios);
+
+/*
+ * GPU Execution context
+ *
+*/
+
+/**
+ * Create GPU execution Context
+ *
+ * For the purpose of GPU Scheduler and GPU Robustness extensions it is
+ * necessary to have information/identify rendering/compute contexts.
+ * It also may be needed to associate some specific requirements with such
+ * contexts. Kernel driver will guarantee that submission from the same
+ * context will always be executed in order (first come, first serve).
+ *
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param context - \c [out] GPU Context handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_free()
+ *
+*/
+int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
+ amdgpu_context_handle *context);
+
+/**
+ *
+ * Destroy GPU execution context when not needed any more
+ *
+ * \param context - \c [in] GPU Context handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_create()
+ *
+*/
+int amdgpu_cs_ctx_free(amdgpu_context_handle context);
+
+/**
+ * Query reset state for the specific GPU Context
+ *
+ * \param context - \c [in] GPU Context handle
+ * \param state - \c [out] One of AMDGPU_CTX_*_RESET
+ * \param hangs - \c [out] Number of hangs caused by the context.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_create()
+ *
+*/
+int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
+ uint32_t *state, uint32_t *hangs);
+
+/*
+ * Command Buffers Management
+ *
+*/
+
+/**
+ * Send request to submit command buffers to hardware.
+ *
+ * Kernel driver could use GPU Scheduler to make decision when physically
+ * sent this request to the hardware. Accordingly this request could be put
+ * in queue and sent for execution later. The only guarantee is that request
+ * from the same GPU context to the same ip:ip_instance:ring will be executed in
+ * order.
+ *
+ * The caller can specify the user fence buffer/location with the fence_info in the
+ * cs_request.The sequence number is returned via the 'seq_no' parameter
+ * in ibs_request structure.
+ *
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param context - \c [in] GPU Context
+ * \param flags - \c [in] Global submission flags
+ * \param ibs_request - \c [in/out] Pointer to submission requests.
+ * We could submit to the several
+ * engines/rings simulteniously as
+ * 'atomic' operation
+ * \param number_of_requests - \c [in] Number of submission requests
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note It is required to pass correct resource list with buffer handles
+ * which will be accessible by command buffers from submission
+ * This will allow kernel driver to correctly implement "paging".
+ * Failure to do so will have unpredictable results.
+ *
+ * \sa amdgpu_command_buffer_alloc(), amdgpu_command_buffer_free(),
+ * amdgpu_cs_query_fence_status()
+ *
+*/
+int amdgpu_cs_submit(amdgpu_context_handle context,
+ uint64_t flags,
+ struct amdgpu_cs_request *ibs_request,
+ uint32_t number_of_requests);
+
+/**
+ * Query status of Command Buffer Submission
+ *
+ * \param fence - \c [in] Structure describing fence to query
+ * \param timeout_ns - \c [in] Timeout value to wait
+ * \param flags - \c [in] Flags for the query
+ * \param expired - \c [out] If fence expired or not.\n
+ * 0 – if fence is not expired\n
+ * !0 - otherwise
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note If UMD wants only to check operation status and returned immediately
+ * then timeout value as 0 must be passed. In this case success will be
+ * returned in the case if submission was completed or timeout error
+ * code.
+ *
+ * \sa amdgpu_cs_submit()
+*/
+int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
+ uint64_t timeout_ns,
+ uint64_t flags,
+ uint32_t *expired);
+
+/*
+ * Query / Info API
+ *
+*/
+
+/**
+ * Query allocation size alignments
+ *
+ * UMD should query information about GPU VM MC size alignments requirements
+ * to be able correctly choose required allocation size and implement
+ * internal optimization if needed.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param info - \c [out] Pointer to structure to get size alignment
+ * requirements
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
+ struct amdgpu_buffer_size_alignments
+ *info);
+
+/**
+ * Query firmware versions
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param fw_type - \c [in] AMDGPU_INFO_FW_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type.
+ * \param index - \c [in] Index of the engine. (for SDMA and MEC)
+ * \param version - \c [out] Pointer to to the "version" return value
+ * \param feature - \c [out] Pointer to to the "feature" return value
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
+ unsigned ip_instance, unsigned index,
+ uint32_t *version, uint32_t *feature);
+
+/**
+ * Query the number of HW IP instances of a certain type.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param count - \c [out] Pointer to structure to get information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+*/
+int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
+ uint32_t *count);
+
+/**
+ * Query engine information
+ *
+ * This query allows UMD to query information different engines and their
+ * capabilities.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type.
+ * \param info - \c [out] Pointer to structure to get information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+*/
+int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
+ unsigned ip_instance,
+ struct drm_amdgpu_info_hw_ip *info);
+
+/**
+ * Query heap information
+ *
+ * This query allows UMD to query potentially available memory resources and
+ * adjust their logic if necessary.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param heap - \c [in] Heap type
+ * \param info - \c [in] Pointer to structure to get needed information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_heap_info(amdgpu_device_handle dev, uint32_t heap,
+ uint32_t flags, struct amdgpu_heap_info *info);
+
+/**
+ * Get the CRTC ID from the mode object ID
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param id - \c [in] Mode object ID
+ * \param result - \c [in] Pointer to the CRTC ID
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
+ int32_t *result);
+
+/**
+ * Query GPU H/w Info
+ *
+ * Query hardware specific information
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param heap - \c [in] Heap type
+ * \param info - \c [in] Pointer to structure to get needed information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_gpu_info(amdgpu_device_handle dev,
+ struct amdgpu_gpu_info *info);
+
+/**
+ * Query hardware or driver information.
+ *
+ * The return size is query-specific and depends on the "info_id" parameter.
+ * No more than "size" bytes is returned.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param info_id - \c [in] AMDGPU_INFO_*
+ * \param size - \c [in] Size of the returned value.
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX error code
+ *
+*/
+int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
+ unsigned size, void *value);
+
+/**
+ * Query information about GDS
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param gds_info - \c [out] Pointer to structure to get GDS information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_gds_info(amdgpu_device_handle dev,
+ struct amdgpu_gds_resource_info *gds_info);
+
+/**
+ * Read a set of consecutive memory-mapped registers.
+ * Not all registers are allowed to be read by userspace.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize(
+ * \param dword_offset - \c [in] Register offset in dwords
+ * \param count - \c [in] The number of registers to read starting
+ * from the offset
+ * \param instance - \c [in] GRBM_GFX_INDEX selector. It may have other
+ * uses. Set it to 0xffffffff if unsure.
+ * \param flags - \c [in] Flags with additional information.
+ * \param values - \c [out] The pointer to return values.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX error code
+ *
+*/
+int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
+ unsigned count, uint32_t instance, uint32_t flags,
+ uint32_t *values);
+
+/**
+ * Flag to request VA address range in the 32bit address space
+*/
+#define AMDGPU_VA_RANGE_32_BIT 0x1
+
+/**
+ * Allocate virtual address range
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param va_range_type - \c [in] Type of MC va range from which to allocate
+ * \param size - \c [in] Size of range. Size must be correctly* aligned.
+ * It is client responsibility to correctly aligned size based on the future
+ * usage of allocated range.
+ * \param va_base_alignment - \c [in] Overwrite base address alignment
+ * requirement for GPU VM MC virtual
+ * address assignment. Must be multiple of size alignments received as
+ * 'amdgpu_buffer_size_alignments'.
+ * If 0 use the default one.
+ * \param va_base_required - \c [in] Specified required va base address.
+ * If 0 then library choose available one.
+ * If !0 value will be passed and those value already "in use" then
+ * corresponding error status will be returned.
+ * \param va_base_allocated - \c [out] On return: Allocated VA base to be used
+ * by client.
+ * \param va_range_handle - \c [out] On return: Handle assigned to allocation
+ * \param flags - \c [in] flags for special VA range
+ *
+ * \return 0 on success\n
+ * >0 - AMD specific error code\n
+ * <0 - Negative POSIX Error code
+ *
+ * \notes \n
+ * It is client responsibility to correctly handle VA assignments and usage.
+ * Neither kernel driver nor libdrm_amdpgu are able to prevent and
+ * detect wrong va assignemnt.
+ *
+ * It is client responsibility to correctly handle multi-GPU cases and to pass
+ * the corresponding arrays of all devices handles where corresponding VA will
+ * be used.
+ *
+*/
+int amdgpu_va_range_alloc(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range va_range_type,
+ uint64_t size,
+ uint64_t va_base_alignment,
+ uint64_t va_base_required,
+ uint64_t *va_base_allocated,
+ amdgpu_va_handle *va_range_handle,
+ uint64_t flags);
+
+/**
+ * Free previously allocated virtual address range
+ *
+ *
+ * \param va_range_handle - \c [in] Handle assigned to VA allocation
+ *
+ * \return 0 on success\n
+ * >0 - AMD specific error code\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_va_range_free(amdgpu_va_handle va_range_handle);
+
+/**
+* Query virtual address range
+*
+* UMD can query GPU VM range supported by each device
+* to initialize its own VAM accordingly.
+*
+* \param dev - [in] Device handle. See #amdgpu_device_initialize()
+* \param type - \c [in] Type of virtual address range
+* \param offset - \c [out] Start offset of virtual address range
+* \param size - \c [out] Size of virtual address range
+*
+* \return 0 on success\n
+* <0 - Negative POSIX Error code
+*
+*/
+
+int amdgpu_va_range_query(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range type,
+ uint64_t *start,
+ uint64_t *end);
+
+/**
+ * VA mapping/unmapping for the buffer object
+ *
+ * \param bo - \c [in] BO handle
+ * \param offset - \c [in] Start offset to map
+ * \param size - \c [in] Size to map
+ * \param addr - \c [in] Start virtual address.
+ * \param flags - \c [in] Supported flags for mapping/unmapping
+ * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+
+int amdgpu_bo_va_op(amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops);
+
+/**
+ * create semaphore
+ *
+ * \param sem - \c [out] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem);
+
+/**
+ * signal semaphore
+ *
+ * \param context - \c [in] GPU Context
+ * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type
+ * \param ring - \c [in] Specify ring index of the IP
+ * \param sem - \c [in] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem);
+
+/**
+ * wait semaphore
+ *
+ * \param context - \c [in] GPU Context
+ * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type
+ * \param ring - \c [in] Specify ring index of the IP
+ * \param sem - \c [in] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem);
+
+/**
+ * destroy semaphore
+ *
+ * \param sem - \c [in] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
+
+#endif /* #ifdef _AMDGPU_H_ */
diff --git a/chromium/third_party/libdrm/src/amdgpu/amdgpu_bo.c b/chromium/third_party/libdrm/src/amdgpu/amdgpu_bo.c
new file mode 100644
index 00000000000..d30fd1e7166
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/amdgpu_bo.c
@@ -0,0 +1,705 @@
+/*
+ * Copyright © 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "util_hash_table.h"
+#include "util_math.h"
+
+static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
+ uint32_t handle)
+{
+ struct drm_gem_close args = {};
+
+ args.handle = handle;
+ drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
+}
+
+drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
+{
+ /* Remove the buffer from the hash tables. */
+ pthread_mutex_lock(&bo->dev->bo_table_mutex);
+ util_hash_table_remove(bo->dev->bo_handles,
+ (void*)(uintptr_t)bo->handle);
+ if (bo->flink_name) {
+ util_hash_table_remove(bo->dev->bo_flink_names,
+ (void*)(uintptr_t)bo->flink_name);
+ }
+ pthread_mutex_unlock(&bo->dev->bo_table_mutex);
+
+ /* Release CPU access. */
+ if (bo->cpu_map_count > 0) {
+ bo->cpu_map_count = 1;
+ amdgpu_bo_cpu_unmap(bo);
+ }
+
+ amdgpu_close_kms_handle(bo->dev, bo->handle);
+ pthread_mutex_destroy(&bo->cpu_access_mutex);
+ free(bo);
+}
+
+int amdgpu_bo_alloc(amdgpu_device_handle dev,
+ struct amdgpu_bo_alloc_request *alloc_buffer,
+ amdgpu_bo_handle *buf_handle)
+{
+ struct amdgpu_bo *bo;
+ union drm_amdgpu_gem_create args;
+ unsigned heap = alloc_buffer->preferred_heap;
+ int r = 0;
+
+ /* It's an error if the heap is not specified */
+ if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
+ return -EINVAL;
+
+ bo = calloc(1, sizeof(struct amdgpu_bo));
+ if (!bo)
+ return -ENOMEM;
+
+ atomic_set(&bo->refcount, 1);
+ bo->dev = dev;
+ bo->alloc_size = alloc_buffer->alloc_size;
+
+ memset(&args, 0, sizeof(args));
+ args.in.bo_size = alloc_buffer->alloc_size;
+ args.in.alignment = alloc_buffer->phys_alignment;
+
+ /* Set the placement. */
+ args.in.domains = heap;
+ args.in.domain_flags = alloc_buffer->flags;
+
+ /* Allocate the buffer with the preferred heap. */
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
+ &args, sizeof(args));
+ if (r) {
+ free(bo);
+ return r;
+ }
+
+ bo->handle = args.out.handle;
+
+ pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+
+ *buf_handle = bo;
+ return 0;
+}
+
+int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
+ struct amdgpu_bo_metadata *info)
+{
+ struct drm_amdgpu_gem_metadata args = {};
+
+ args.handle = bo->handle;
+ args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
+ args.data.flags = info->flags;
+ args.data.tiling_info = info->tiling_info;
+
+ if (info->size_metadata > sizeof(args.data.data))
+ return -EINVAL;
+
+ if (info->size_metadata) {
+ args.data.data_size_bytes = info->size_metadata;
+ memcpy(args.data.data, info->umd_metadata, info->size_metadata);
+ }
+
+ return drmCommandWriteRead(bo->dev->fd,
+ DRM_AMDGPU_GEM_METADATA,
+ &args, sizeof(args));
+}
+
+int amdgpu_bo_query_info(amdgpu_bo_handle bo,
+ struct amdgpu_bo_info *info)
+{
+ struct drm_amdgpu_gem_metadata metadata = {};
+ struct drm_amdgpu_gem_create_in bo_info = {};
+ struct drm_amdgpu_gem_op gem_op = {};
+ int r;
+
+ /* Validate the BO passed in */
+ if (!bo->handle)
+ return -EINVAL;
+
+ /* Query metadata. */
+ metadata.handle = bo->handle;
+ metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
+
+ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
+ &metadata, sizeof(metadata));
+ if (r)
+ return r;
+
+ if (metadata.data.data_size_bytes >
+ sizeof(info->metadata.umd_metadata))
+ return -EINVAL;
+
+ /* Query buffer info. */
+ gem_op.handle = bo->handle;
+ gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
+ gem_op.value = (uintptr_t)&bo_info;
+
+ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
+ &gem_op, sizeof(gem_op));
+ if (r)
+ return r;
+
+ memset(info, 0, sizeof(*info));
+ info->alloc_size = bo_info.bo_size;
+ info->phys_alignment = bo_info.alignment;
+ info->preferred_heap = bo_info.domains;
+ info->alloc_flags = bo_info.domain_flags;
+ info->metadata.flags = metadata.data.flags;
+ info->metadata.tiling_info = metadata.data.tiling_info;
+
+ info->metadata.size_metadata = metadata.data.data_size_bytes;
+ if (metadata.data.data_size_bytes > 0)
+ memcpy(info->metadata.umd_metadata, metadata.data.data,
+ metadata.data.data_size_bytes);
+
+ return 0;
+}
+
+static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
+{
+ pthread_mutex_lock(&bo->dev->bo_table_mutex);
+ util_hash_table_set(bo->dev->bo_handles,
+ (void*)(uintptr_t)bo->handle, bo);
+ pthread_mutex_unlock(&bo->dev->bo_table_mutex);
+}
+
+static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
+{
+ struct drm_gem_flink flink;
+ int fd, dma_fd;
+ uint32_t handle;
+ int r;
+
+ fd = bo->dev->fd;
+ handle = bo->handle;
+ if (bo->flink_name)
+ return 0;
+
+
+ if (bo->dev->flink_fd != bo->dev->fd) {
+ r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
+ &dma_fd);
+ if (!r) {
+ r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
+ close(dma_fd);
+ }
+ if (r)
+ return r;
+ fd = bo->dev->flink_fd;
+ }
+ memset(&flink, 0, sizeof(flink));
+ flink.handle = handle;
+
+ r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (r)
+ return r;
+
+ bo->flink_name = flink.name;
+
+ if (bo->dev->flink_fd != bo->dev->fd) {
+ struct drm_gem_close args = {};
+ args.handle = handle;
+ drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
+ }
+
+ pthread_mutex_lock(&bo->dev->bo_table_mutex);
+ util_hash_table_set(bo->dev->bo_flink_names,
+ (void*)(uintptr_t)bo->flink_name,
+ bo);
+ pthread_mutex_unlock(&bo->dev->bo_table_mutex);
+
+ return 0;
+}
+
+int amdgpu_bo_export(amdgpu_bo_handle bo,
+ enum amdgpu_bo_handle_type type,
+ uint32_t *shared_handle)
+{
+ int r;
+
+ switch (type) {
+ case amdgpu_bo_handle_type_gem_flink_name:
+ r = amdgpu_bo_export_flink(bo);
+ if (r)
+ return r;
+
+ *shared_handle = bo->flink_name;
+ return 0;
+
+ case amdgpu_bo_handle_type_kms:
+ amdgpu_add_handle_to_table(bo);
+ *shared_handle = bo->handle;
+ return 0;
+
+ case amdgpu_bo_handle_type_dma_buf_fd:
+ amdgpu_add_handle_to_table(bo);
+ return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
+ (int*)shared_handle);
+ }
+ return -EINVAL;
+}
+
+int amdgpu_bo_import(amdgpu_device_handle dev,
+ enum amdgpu_bo_handle_type type,
+ uint32_t shared_handle,
+ struct amdgpu_bo_import_result *output)
+{
+ struct drm_gem_open open_arg = {};
+ struct amdgpu_bo *bo = NULL;
+ int r;
+ int dma_fd;
+ uint64_t dma_buf_size = 0;
+
+ /* We must maintain a list of pairs <handle, bo>, so that we always
+ * return the same amdgpu_bo instance for the same handle. */
+ pthread_mutex_lock(&dev->bo_table_mutex);
+
+ /* Convert a DMA buf handle to a KMS handle now. */
+ if (type == amdgpu_bo_handle_type_dma_buf_fd) {
+ uint32_t handle;
+ off_t size;
+
+ /* Get a KMS handle. */
+ r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
+ if (r) {
+ return r;
+ }
+
+ /* Query the buffer size. */
+ size = lseek(shared_handle, 0, SEEK_END);
+ if (size == (off_t)-1) {
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ amdgpu_close_kms_handle(dev, handle);
+ return -errno;
+ }
+ lseek(shared_handle, 0, SEEK_SET);
+
+ dma_buf_size = size;
+ shared_handle = handle;
+ }
+
+ /* If we have already created a buffer with this handle, find it. */
+ switch (type) {
+ case amdgpu_bo_handle_type_gem_flink_name:
+ bo = util_hash_table_get(dev->bo_flink_names,
+ (void*)(uintptr_t)shared_handle);
+ break;
+
+ case amdgpu_bo_handle_type_dma_buf_fd:
+ bo = util_hash_table_get(dev->bo_handles,
+ (void*)(uintptr_t)shared_handle);
+ break;
+
+ case amdgpu_bo_handle_type_kms:
+ /* Importing a KMS handle in not allowed. */
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ return -EPERM;
+
+ default:
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ return -EINVAL;
+ }
+
+ if (bo) {
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+
+ /* The buffer already exists, just bump the refcount. */
+ atomic_inc(&bo->refcount);
+
+ output->buf_handle = bo;
+ output->alloc_size = bo->alloc_size;
+ return 0;
+ }
+
+ bo = calloc(1, sizeof(struct amdgpu_bo));
+ if (!bo) {
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ if (type == amdgpu_bo_handle_type_dma_buf_fd) {
+ amdgpu_close_kms_handle(dev, shared_handle);
+ }
+ return -ENOMEM;
+ }
+
+ /* Open the handle. */
+ switch (type) {
+ case amdgpu_bo_handle_type_gem_flink_name:
+ open_arg.name = shared_handle;
+ r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ if (r) {
+ free(bo);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ return r;
+ }
+
+ bo->handle = open_arg.handle;
+ if (dev->flink_fd != dev->fd) {
+ r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
+ if (r) {
+ free(bo);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ return r;
+ }
+ r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
+
+ close(dma_fd);
+
+ if (r) {
+ free(bo);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ return r;
+ }
+ }
+ bo->flink_name = shared_handle;
+ bo->alloc_size = open_arg.size;
+ util_hash_table_set(dev->bo_flink_names,
+ (void*)(uintptr_t)bo->flink_name, bo);
+ break;
+
+ case amdgpu_bo_handle_type_dma_buf_fd:
+ bo->handle = shared_handle;
+ bo->alloc_size = dma_buf_size;
+ break;
+
+ case amdgpu_bo_handle_type_kms:
+ assert(0); /* unreachable */
+ }
+
+ /* Initialize it. */
+ atomic_set(&bo->refcount, 1);
+ bo->dev = dev;
+ pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+
+ util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+
+ output->buf_handle = bo;
+ output->alloc_size = bo->alloc_size;
+ return 0;
+}
+
+int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
+{
+ /* Just drop the reference. */
+ amdgpu_bo_reference(&buf_handle, NULL);
+ return 0;
+}
+
+int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
+{
+ union drm_amdgpu_gem_mmap args;
+ void *ptr;
+ int r;
+
+ pthread_mutex_lock(&bo->cpu_access_mutex);
+
+ if (bo->cpu_ptr) {
+ /* already mapped */
+ assert(bo->cpu_map_count > 0);
+ bo->cpu_map_count++;
+ *cpu = bo->cpu_ptr;
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return 0;
+ }
+
+ assert(bo->cpu_map_count == 0);
+
+ memset(&args, 0, sizeof(args));
+
+ /* Query the buffer address (args.addr_ptr).
+ * The kernel driver ignores the offset and size parameters. */
+ args.in.handle = bo->handle;
+
+ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
+ sizeof(args));
+ if (r) {
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return r;
+ }
+
+ /* Map the buffer. */
+ ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bo->dev->fd, args.out.addr_ptr);
+ if (ptr == MAP_FAILED) {
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return -errno;
+ }
+
+ bo->cpu_ptr = ptr;
+ bo->cpu_map_count = 1;
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+
+ *cpu = ptr;
+ return 0;
+}
+
+int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
+{
+ int r;
+
+ pthread_mutex_lock(&bo->cpu_access_mutex);
+ assert(bo->cpu_map_count >= 0);
+
+ if (bo->cpu_map_count == 0) {
+ /* not mapped */
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return -EINVAL;
+ }
+
+ bo->cpu_map_count--;
+ if (bo->cpu_map_count > 0) {
+ /* mapped multiple times */
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return 0;
+ }
+
+ r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
+ bo->cpu_ptr = NULL;
+ pthread_mutex_unlock(&bo->cpu_access_mutex);
+ return r;
+}
+
+int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
+ struct amdgpu_buffer_size_alignments *info)
+{
+ info->size_local = dev->dev_info.pte_fragment_size;
+ info->size_remote = dev->dev_info.gart_page_size;
+ return 0;
+}
+
+int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
+ uint64_t timeout_ns,
+ bool *busy)
+{
+ union drm_amdgpu_gem_wait_idle args;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+ args.in.handle = bo->handle;
+ args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
+
+ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
+ &args, sizeof(args));
+
+ if (r == 0) {
+ *busy = args.out.status;
+ return 0;
+ } else {
+ fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
+ return r;
+ }
+}
+
+int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
+ void *cpu,
+ uint64_t size,
+ amdgpu_bo_handle *buf_handle)
+{
+ int r;
+ struct amdgpu_bo *bo;
+ struct drm_amdgpu_gem_userptr args;
+
+ args.addr = (uintptr_t)cpu;
+ args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
+ AMDGPU_GEM_USERPTR_VALIDATE;
+ args.size = size;
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
+ &args, sizeof(args));
+ if (r)
+ return r;
+
+ bo = calloc(1, sizeof(struct amdgpu_bo));
+ if (!bo)
+ return -ENOMEM;
+
+ atomic_set(&bo->refcount, 1);
+ bo->dev = dev;
+ bo->alloc_size = size;
+ bo->handle = args.handle;
+
+ *buf_handle = bo;
+
+ return r;
+}
+
+int amdgpu_bo_list_create(amdgpu_device_handle dev,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios,
+ amdgpu_bo_list_handle *result)
+{
+ struct drm_amdgpu_bo_list_entry *list;
+ union drm_amdgpu_bo_list args;
+ unsigned i;
+ int r;
+
+ if (!number_of_resources)
+ return -EINVAL;
+
+ /* overflow check for multiplication */
+ if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
+ return -EINVAL;
+
+ list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
+ if (!list)
+ return -ENOMEM;
+
+ *result = malloc(sizeof(struct amdgpu_bo_list));
+ if (!*result) {
+ free(list);
+ return -ENOMEM;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
+ args.in.bo_number = number_of_resources;
+ args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
+
+ for (i = 0; i < number_of_resources; i++) {
+ list[i].bo_handle = resources[i]->handle;
+ if (resource_prios)
+ list[i].bo_priority = resource_prios[i];
+ else
+ list[i].bo_priority = 0;
+ }
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+ free(list);
+ if (r) {
+ free(*result);
+ return r;
+ }
+
+ (*result)->dev = dev;
+ (*result)->handle = args.out.list_handle;
+ return 0;
+}
+
+int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
+{
+ union drm_amdgpu_bo_list args;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+ args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
+ args.in.list_handle = list->handle;
+
+ r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+
+ if (!r)
+ free(list);
+
+ return r;
+}
+
+int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios)
+{
+ struct drm_amdgpu_bo_list_entry *list;
+ union drm_amdgpu_bo_list args;
+ unsigned i;
+ int r;
+
+ if (!number_of_resources)
+ return -EINVAL;
+
+ /* overflow check for multiplication */
+ if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
+ return -EINVAL;
+
+ list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
+ if (list == NULL)
+ return -ENOMEM;
+
+ args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
+ args.in.list_handle = handle->handle;
+ args.in.bo_number = number_of_resources;
+ args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ args.in.bo_info_ptr = (uintptr_t)list;
+
+ for (i = 0; i < number_of_resources; i++) {
+ list[i].bo_handle = resources[i]->handle;
+ if (resource_prios)
+ list[i].bo_priority = resource_prios[i];
+ else
+ list[i].bo_priority = 0;
+ }
+
+ r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+ free(list);
+ return r;
+}
+
+int amdgpu_bo_va_op(amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops)
+{
+ amdgpu_device_handle dev = bo->dev;
+ struct drm_amdgpu_gem_va va;
+ int r;
+
+ if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP)
+ return -EINVAL;
+
+ memset(&va, 0, sizeof(va));
+ va.handle = bo->handle;
+ va.operation = ops;
+ va.flags = AMDGPU_VM_PAGE_READABLE |
+ AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE;
+ va.va_address = addr;
+ va.offset_in_bo = offset;
+ va.map_size = ALIGN(size, getpagesize());
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
+
+ return r;
+}
diff --git a/chromium/third_party/libdrm/src/amdgpu/amdgpu_cs.c b/chromium/third_party/libdrm/src/amdgpu/amdgpu_cs.c
new file mode 100644
index 00000000000..fb5b3a8c4bc
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/amdgpu_cs.c
@@ -0,0 +1,544 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/ioctl.h>
+#ifdef HAVE_ALLOCA_H
+# include <alloca.h>
+#endif
+
+#include "xf86drm.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+
+static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem);
+static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
+
+/**
+ * Create command submission context
+ *
+ * \param dev - \c [in] amdgpu device handle
+ * \param context - \c [out] amdgpu context handle
+ *
+ * \return 0 on success otherwise POSIX Error code
+*/
+int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
+ amdgpu_context_handle *context)
+{
+ struct amdgpu_context *gpu_context;
+ union drm_amdgpu_ctx args;
+ int i, j, k;
+ int r;
+
+ if (NULL == dev)
+ return -EINVAL;
+ if (NULL == context)
+ return -EINVAL;
+
+ gpu_context = calloc(1, sizeof(struct amdgpu_context));
+ if (NULL == gpu_context)
+ return -ENOMEM;
+
+ gpu_context->dev = dev;
+
+ r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
+ if (r)
+ goto error;
+
+ /* Create the context */
+ memset(&args, 0, sizeof(args));
+ args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
+ if (r)
+ goto error;
+
+ gpu_context->id = args.out.alloc.ctx_id;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
+ for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++)
+ for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++)
+ list_inithead(&gpu_context->sem_list[i][j][k]);
+ *context = (amdgpu_context_handle)gpu_context;
+
+ return 0;
+
+error:
+ pthread_mutex_destroy(&gpu_context->sequence_mutex);
+ free(gpu_context);
+ return r;
+}
+
+/**
+ * Release command submission context
+ *
+ * \param dev - \c [in] amdgpu device handle
+ * \param context - \c [in] amdgpu context handle
+ *
+ * \return 0 on success otherwise POSIX Error code
+*/
+int amdgpu_cs_ctx_free(amdgpu_context_handle context)
+{
+ union drm_amdgpu_ctx args;
+ int i, j, k;
+ int r;
+
+ if (NULL == context)
+ return -EINVAL;
+
+ pthread_mutex_destroy(&context->sequence_mutex);
+
+ /* now deal with kernel side */
+ memset(&args, 0, sizeof(args));
+ args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ args.in.ctx_id = context->id;
+ r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
+ &args, sizeof(args));
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
+ for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
+ amdgpu_semaphore_handle sem;
+ LIST_FOR_EACH_ENTRY(sem, &context->sem_list[i][j][k], list) {
+ list_del(&sem->list);
+ amdgpu_cs_reset_sem(sem);
+ amdgpu_cs_unreference_sem(sem);
+ }
+ }
+ }
+ }
+ free(context);
+
+ return r;
+}
+
+int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
+ uint32_t *state, uint32_t *hangs)
+{
+ union drm_amdgpu_ctx args;
+ int r;
+
+ if (!context)
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(args));
+ args.in.op = AMDGPU_CTX_OP_QUERY_STATE;
+ args.in.ctx_id = context->id;
+ r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
+ &args, sizeof(args));
+ if (!r) {
+ *state = args.out.state.reset_status;
+ *hangs = args.out.state.hangs;
+ }
+ return r;
+}
+
+/**
+ * Submit command to kernel DRM
+ * \param dev - \c [in] Device handle
+ * \param context - \c [in] GPU Context
+ * \param ibs_request - \c [in] Pointer to submission requests
+ * \param fence - \c [out] return fence for this submission
+ *
+ * \return 0 on success otherwise POSIX Error code
+ * \sa amdgpu_cs_submit()
+*/
+static int amdgpu_cs_submit_one(amdgpu_context_handle context,
+ struct amdgpu_cs_request *ibs_request)
+{
+ union drm_amdgpu_cs cs;
+ uint64_t *chunk_array;
+ struct drm_amdgpu_cs_chunk *chunks;
+ struct drm_amdgpu_cs_chunk_data *chunk_data;
+ struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
+ struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
+ struct list_head *sem_list;
+ amdgpu_semaphore_handle sem, tmp;
+ uint32_t i, size, sem_count = 0;
+ bool user_fence;
+ int r = 0;
+
+ if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
+ return -EINVAL;
+ if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
+ return -EINVAL;
+ if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
+ return -EINVAL;
+ if (ibs_request->number_of_ibs == 0) {
+ ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
+ return 0;
+ }
+ user_fence = (ibs_request->fence_info.handle != NULL);
+
+ size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
+
+ chunk_array = alloca(sizeof(uint64_t) * size);
+ chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
+
+ size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
+
+ chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
+
+ memset(&cs, 0, sizeof(cs));
+ cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
+ cs.in.ctx_id = context->id;
+ if (ibs_request->resources)
+ cs.in.bo_list_handle = ibs_request->resources->handle;
+ cs.in.num_chunks = ibs_request->number_of_ibs;
+ /* IB chunks */
+ for (i = 0; i < ibs_request->number_of_ibs; i++) {
+ struct amdgpu_cs_ib_info *ib;
+ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
+
+ ib = &ibs_request->ibs[i];
+
+ chunk_data[i].ib_data._pad = 0;
+ chunk_data[i].ib_data.va_start = ib->ib_mc_address;
+ chunk_data[i].ib_data.ib_bytes = ib->size * 4;
+ chunk_data[i].ib_data.ip_type = ibs_request->ip_type;
+ chunk_data[i].ib_data.ip_instance = ibs_request->ip_instance;
+ chunk_data[i].ib_data.ring = ibs_request->ring;
+ chunk_data[i].ib_data.flags = ib->flags;
+ }
+
+ pthread_mutex_lock(&context->sequence_mutex);
+
+ if (user_fence) {
+ i = cs.in.num_chunks++;
+
+ /* fence chunk */
+ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
+
+ /* fence bo handle */
+ chunk_data[i].fence_data.handle = ibs_request->fence_info.handle->handle;
+ /* offset */
+ chunk_data[i].fence_data.offset =
+ ibs_request->fence_info.offset * sizeof(uint64_t);
+ }
+
+ if (ibs_request->number_of_dependencies) {
+ dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) *
+ ibs_request->number_of_dependencies);
+ if (!dependencies) {
+ r = -ENOMEM;
+ goto error_unlock;
+ }
+
+ for (i = 0; i < ibs_request->number_of_dependencies; ++i) {
+ struct amdgpu_cs_fence *info = &ibs_request->dependencies[i];
+ struct drm_amdgpu_cs_chunk_dep *dep = &dependencies[i];
+ dep->ip_type = info->ip_type;
+ dep->ip_instance = info->ip_instance;
+ dep->ring = info->ring;
+ dep->ctx_id = info->context->id;
+ dep->handle = info->fence;
+ }
+
+ i = cs.in.num_chunks++;
+
+ /* dependencies chunk */
+ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4
+ * ibs_request->number_of_dependencies;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies;
+ }
+
+ sem_list = &context->sem_list[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring];
+ LIST_FOR_EACH_ENTRY(sem, sem_list, list)
+ sem_count++;
+ if (sem_count) {
+ sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
+ if (!sem_dependencies) {
+ r = -ENOMEM;
+ goto error_unlock;
+ }
+ sem_count = 0;
+ LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, sem_list, list) {
+ struct amdgpu_cs_fence *info = &sem->signal_fence;
+ struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
+ dep->ip_type = info->ip_type;
+ dep->ip_instance = info->ip_instance;
+ dep->ring = info->ring;
+ dep->ctx_id = info->context->id;
+ dep->handle = info->fence;
+
+ list_del(&sem->list);
+ amdgpu_cs_reset_sem(sem);
+ amdgpu_cs_unreference_sem(sem);
+ }
+ i = cs.in.num_chunks++;
+
+ /* dependencies chunk */
+ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
+ }
+
+ r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
+ &cs, sizeof(cs));
+ if (r)
+ goto error_unlock;
+
+ ibs_request->seq_no = cs.out.handle;
+ context->last_seq[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring] = ibs_request->seq_no;
+error_unlock:
+ pthread_mutex_unlock(&context->sequence_mutex);
+ free(dependencies);
+ free(sem_dependencies);
+ return r;
+}
+
+int amdgpu_cs_submit(amdgpu_context_handle context,
+ uint64_t flags,
+ struct amdgpu_cs_request *ibs_request,
+ uint32_t number_of_requests)
+{
+ uint32_t i;
+ int r;
+
+ if (NULL == context)
+ return -EINVAL;
+ if (NULL == ibs_request)
+ return -EINVAL;
+
+ r = 0;
+ for (i = 0; i < number_of_requests; i++) {
+ r = amdgpu_cs_submit_one(context, ibs_request);
+ if (r)
+ break;
+ ibs_request++;
+ }
+
+ return r;
+}
+
+/**
+ * Calculate absolute timeout.
+ *
+ * \param timeout - \c [in] timeout in nanoseconds.
+ *
+ * \return absolute timeout in nanoseconds
+*/
+drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
+{
+ int r;
+
+ if (timeout != AMDGPU_TIMEOUT_INFINITE) {
+ struct timespec current;
+ uint64_t current_ns;
+ r = clock_gettime(CLOCK_MONOTONIC, &current);
+ if (r) {
+ fprintf(stderr, "clock_gettime() returned error (%d)!", errno);
+ return AMDGPU_TIMEOUT_INFINITE;
+ }
+
+ current_ns = ((uint64_t)current.tv_sec) * 1000000000ull;
+ current_ns += current.tv_nsec;
+ timeout += current_ns;
+ if (timeout < current_ns)
+ timeout = AMDGPU_TIMEOUT_INFINITE;
+ }
+ return timeout;
+}
+
+static int amdgpu_ioctl_wait_cs(amdgpu_context_handle context,
+ unsigned ip,
+ unsigned ip_instance,
+ uint32_t ring,
+ uint64_t handle,
+ uint64_t timeout_ns,
+ uint64_t flags,
+ bool *busy)
+{
+ amdgpu_device_handle dev = context->dev;
+ union drm_amdgpu_wait_cs args;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+ args.in.handle = handle;
+ args.in.ip_type = ip;
+ args.in.ip_instance = ip_instance;
+ args.in.ring = ring;
+ args.in.ctx_id = context->id;
+
+ if (flags & AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE)
+ args.in.timeout = timeout_ns;
+ else
+ args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
+
+ r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
+ if (r)
+ return -errno;
+
+ *busy = args.out.status;
+ return 0;
+}
+
+int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
+ uint64_t timeout_ns,
+ uint64_t flags,
+ uint32_t *expired)
+{
+ bool busy = true;
+ int r;
+
+ if (NULL == fence)
+ return -EINVAL;
+ if (NULL == expired)
+ return -EINVAL;
+ if (NULL == fence->context)
+ return -EINVAL;
+ if (fence->ip_type >= AMDGPU_HW_IP_NUM)
+ return -EINVAL;
+ if (fence->ring >= AMDGPU_CS_MAX_RINGS)
+ return -EINVAL;
+ if (fence->fence == AMDGPU_NULL_SUBMIT_SEQ) {
+ *expired = true;
+ return 0;
+ }
+
+ *expired = false;
+
+ r = amdgpu_ioctl_wait_cs(fence->context, fence->ip_type,
+ fence->ip_instance, fence->ring,
+ fence->fence, timeout_ns, flags, &busy);
+
+ if (!r && !busy)
+ *expired = true;
+
+ return r;
+}
+
+int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
+{
+ struct amdgpu_semaphore *gpu_semaphore;
+
+ if (NULL == sem)
+ return -EINVAL;
+
+ gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
+ if (NULL == gpu_semaphore)
+ return -ENOMEM;
+
+ atomic_set(&gpu_semaphore->refcount, 1);
+ *sem = gpu_semaphore;
+
+ return 0;
+}
+
+int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem)
+{
+ if (NULL == ctx)
+ return -EINVAL;
+ if (ip_type >= AMDGPU_HW_IP_NUM)
+ return -EINVAL;
+ if (ring >= AMDGPU_CS_MAX_RINGS)
+ return -EINVAL;
+ if (NULL == sem)
+ return -EINVAL;
+ /* sem has been signaled */
+ if (sem->signal_fence.context)
+ return -EINVAL;
+ pthread_mutex_lock(&ctx->sequence_mutex);
+ sem->signal_fence.context = ctx;
+ sem->signal_fence.ip_type = ip_type;
+ sem->signal_fence.ip_instance = ip_instance;
+ sem->signal_fence.ring = ring;
+ sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
+ update_references(NULL, &sem->refcount);
+ pthread_mutex_unlock(&ctx->sequence_mutex);
+ return 0;
+}
+
+int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem)
+{
+ if (NULL == ctx)
+ return -EINVAL;
+ if (ip_type >= AMDGPU_HW_IP_NUM)
+ return -EINVAL;
+ if (ring >= AMDGPU_CS_MAX_RINGS)
+ return -EINVAL;
+ if (NULL == sem)
+ return -EINVAL;
+ /* must signal first */
+ if (NULL == sem->signal_fence.context)
+ return -EINVAL;
+
+ pthread_mutex_lock(&ctx->sequence_mutex);
+ list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
+ pthread_mutex_unlock(&ctx->sequence_mutex);
+ return 0;
+}
+
+static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
+{
+ if (NULL == sem)
+ return -EINVAL;
+ if (NULL == sem->signal_fence.context)
+ return -EINVAL;
+
+ sem->signal_fence.context = NULL;;
+ sem->signal_fence.ip_type = 0;
+ sem->signal_fence.ip_instance = 0;
+ sem->signal_fence.ring = 0;
+ sem->signal_fence.fence = 0;
+
+ return 0;
+}
+
+static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
+{
+ if (NULL == sem)
+ return -EINVAL;
+
+ if (update_references(&sem->refcount, NULL))
+ free(sem);
+ return 0;
+}
+
+int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
+{
+ return amdgpu_cs_unreference_sem(sem);
+}
diff --git a/chromium/third_party/libdrm/src/amdgpu/amdgpu_device.c b/chromium/third_party/libdrm/src/amdgpu/amdgpu_device.c
new file mode 100644
index 00000000000..e5a923e6785
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/amdgpu_device.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * \file amdgpu_device.c
+ *
+ * Implementation of functions for AMD GPU device
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/stat.h>
+#include <errno.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "xf86drm.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "util_hash_table.h"
+#include "util_math.h"
+
+#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
+#define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
+
+static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct util_hash_table *fd_tab;
+
+static unsigned handle_hash(void *key)
+{
+ return PTR_TO_UINT(key);
+}
+
+static int handle_compare(void *key1, void *key2)
+{
+ return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
+}
+
+static unsigned fd_hash(void *key)
+{
+ int fd = PTR_TO_UINT(key);
+ char *name = drmGetPrimaryDeviceNameFromFd(fd);
+ unsigned result = 0;
+ char *c;
+
+ if (name == NULL)
+ return 0;
+
+ for (c = name; *c; ++c)
+ result += *c;
+
+ free(name);
+
+ return result;
+}
+
+static int fd_compare(void *key1, void *key2)
+{
+ int fd1 = PTR_TO_UINT(key1);
+ int fd2 = PTR_TO_UINT(key2);
+ char *name1 = drmGetPrimaryDeviceNameFromFd(fd1);
+ char *name2 = drmGetPrimaryDeviceNameFromFd(fd2);
+ int result;
+
+ if (name1 == NULL || name2 == NULL) {
+ free(name1);
+ free(name2);
+ return 0;
+ }
+
+ result = strcmp(name1, name2);
+ free(name1);
+ free(name2);
+
+ return result;
+}
+
+/**
+* Get the authenticated form fd,
+*
+* \param fd - \c [in] File descriptor for AMD GPU device
+* \param auth - \c [out] Pointer to output the fd is authenticated or not
+* A render node fd, output auth = 0
+* A legacy fd, get the authenticated for compatibility root
+*
+* \return 0 on success\n
+* >0 - AMD specific error code\n
+* <0 - Negative POSIX Error code
+*/
+static int amdgpu_get_auth(int fd, int *auth)
+{
+ int r = 0;
+ drm_client_t client = {};
+
+ if (drmGetNodeTypeFromFd(fd) == DRM_NODE_RENDER)
+ *auth = 0;
+ else {
+ client.idx = 0;
+ r = drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client);
+ if (!r)
+ *auth = client.auth;
+ }
+ return r;
+}
+
+static void amdgpu_device_free_internal(amdgpu_device_handle dev)
+{
+ amdgpu_vamgr_deinit(dev->vamgr);
+ free(dev->vamgr);
+ amdgpu_vamgr_deinit(dev->vamgr_32);
+ free(dev->vamgr_32);
+ util_hash_table_destroy(dev->bo_flink_names);
+ util_hash_table_destroy(dev->bo_handles);
+ pthread_mutex_destroy(&dev->bo_table_mutex);
+ util_hash_table_remove(fd_tab, UINT_TO_PTR(dev->fd));
+ close(dev->fd);
+ if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
+ close(dev->flink_fd);
+ free(dev);
+}
+
+/**
+ * Assignment between two amdgpu_device pointers with reference counting.
+ *
+ * Usage:
+ * struct amdgpu_device *dst = ... , *src = ...;
+ *
+ * dst = src;
+ * // No reference counting. Only use this when you need to move
+ * // a reference from one pointer to another.
+ *
+ * amdgpu_device_reference(&dst, src);
+ * // Reference counters are updated. dst is decremented and src is
+ * // incremented. dst is freed if its reference counter is 0.
+ */
+static void amdgpu_device_reference(struct amdgpu_device **dst,
+ struct amdgpu_device *src)
+{
+ if (update_references(&(*dst)->refcount, &src->refcount))
+ amdgpu_device_free_internal(*dst);
+ *dst = src;
+}
+
+int amdgpu_device_initialize(int fd,
+ uint32_t *major_version,
+ uint32_t *minor_version,
+ amdgpu_device_handle *device_handle)
+{
+ struct amdgpu_device *dev;
+ drmVersionPtr version;
+ int r;
+ int flag_auth = 0;
+ int flag_authexist=0;
+ uint32_t accel_working = 0;
+ uint64_t start, max;
+
+ *device_handle = NULL;
+
+ pthread_mutex_lock(&fd_mutex);
+ if (!fd_tab)
+ fd_tab = util_hash_table_create(fd_hash, fd_compare);
+ r = amdgpu_get_auth(fd, &flag_auth);
+ if (r) {
+ pthread_mutex_unlock(&fd_mutex);
+ return r;
+ }
+ dev = util_hash_table_get(fd_tab, UINT_TO_PTR(fd));
+ if (dev) {
+ r = amdgpu_get_auth(dev->fd, &flag_authexist);
+ if (r) {
+ pthread_mutex_unlock(&fd_mutex);
+ return r;
+ }
+ if ((flag_auth) && (!flag_authexist)) {
+ dev->flink_fd = dup(fd);
+ }
+ *major_version = dev->major_version;
+ *minor_version = dev->minor_version;
+ amdgpu_device_reference(device_handle, dev);
+ pthread_mutex_unlock(&fd_mutex);
+ return 0;
+ }
+
+ dev = calloc(1, sizeof(struct amdgpu_device));
+ if (!dev) {
+ pthread_mutex_unlock(&fd_mutex);
+ return -ENOMEM;
+ }
+
+ dev->fd = -1;
+ dev->flink_fd = -1;
+
+ atomic_set(&dev->refcount, 1);
+
+ version = drmGetVersion(fd);
+ if (version->version_major != 3) {
+ fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
+ "only compatible with 3.x.x.\n",
+ __func__,
+ version->version_major,
+ version->version_minor,
+ version->version_patchlevel);
+ drmFreeVersion(version);
+ r = -EBADF;
+ goto cleanup;
+ }
+
+ dev->fd = dup(fd);
+ dev->flink_fd = dev->fd;
+ dev->major_version = version->version_major;
+ dev->minor_version = version->version_minor;
+ drmFreeVersion(version);
+
+ dev->bo_flink_names = util_hash_table_create(handle_hash,
+ handle_compare);
+ dev->bo_handles = util_hash_table_create(handle_hash, handle_compare);
+ pthread_mutex_init(&dev->bo_table_mutex, NULL);
+
+ /* Check if acceleration is working. */
+ r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
+ if (r)
+ goto cleanup;
+ if (!accel_working) {
+ r = -EBADF;
+ goto cleanup;
+ }
+
+ r = amdgpu_query_gpu_info_init(dev);
+ if (r)
+ goto cleanup;
+
+ dev->vamgr = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
+ if (dev->vamgr == NULL)
+ goto cleanup;
+
+ amdgpu_vamgr_init(dev->vamgr, dev->dev_info.virtual_address_offset,
+ dev->dev_info.virtual_address_max,
+ dev->dev_info.virtual_address_alignment);
+
+ max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
+ start = amdgpu_vamgr_find_va(dev->vamgr,
+ max - dev->dev_info.virtual_address_offset,
+ dev->dev_info.virtual_address_alignment, 0);
+ if (start > 0xffffffff)
+ goto free_va; /* shouldn't get here */
+
+ dev->vamgr_32 = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
+ if (dev->vamgr_32 == NULL)
+ goto free_va;
+ amdgpu_vamgr_init(dev->vamgr_32, start, max,
+ dev->dev_info.virtual_address_alignment);
+
+ *major_version = dev->major_version;
+ *minor_version = dev->minor_version;
+ *device_handle = dev;
+ util_hash_table_set(fd_tab, UINT_TO_PTR(dev->fd), dev);
+ pthread_mutex_unlock(&fd_mutex);
+
+ return 0;
+
+free_va:
+ r = -ENOMEM;
+ amdgpu_vamgr_free_va(dev->vamgr, start,
+ max - dev->dev_info.virtual_address_offset);
+ amdgpu_vamgr_deinit(dev->vamgr);
+ free(dev->vamgr);
+
+cleanup:
+ if (dev->fd >= 0)
+ close(dev->fd);
+ free(dev);
+ pthread_mutex_unlock(&fd_mutex);
+ return r;
+}
+
+int amdgpu_device_deinitialize(amdgpu_device_handle dev)
+{
+ amdgpu_device_reference(&dev, NULL);
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/amdgpu/amdgpu_gpu_info.c b/chromium/third_party/libdrm/src/amdgpu/amdgpu_gpu_info.c
new file mode 100644
index 00000000000..0cc17f1f51d
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/amdgpu_gpu_info.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright © 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <string.h>
+
+#include "amdgpu.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "xf86drm.h"
+
+int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
+ unsigned size, void *value)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)value;
+ request.return_size = size;
+ request.query = info_id;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
+ int32_t *result)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)result;
+ request.return_size = sizeof(*result);
+ request.query = AMDGPU_INFO_CRTC_FROM_ID;
+ request.mode_crtc.id = id;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
+ unsigned count, uint32_t instance, uint32_t flags,
+ uint32_t *values)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)values;
+ request.return_size = count * sizeof(uint32_t);
+ request.query = AMDGPU_INFO_READ_MMR_REG;
+ request.read_mmr_reg.dword_offset = dword_offset;
+ request.read_mmr_reg.count = count;
+ request.read_mmr_reg.instance = instance;
+ request.read_mmr_reg.flags = flags;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
+ uint32_t *count)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)count;
+ request.return_size = sizeof(*count);
+ request.query = AMDGPU_INFO_HW_IP_COUNT;
+ request.query_hw_ip.type = type;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
+ unsigned ip_instance,
+ struct drm_amdgpu_info_hw_ip *info)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)info;
+ request.return_size = sizeof(*info);
+ request.query = AMDGPU_INFO_HW_IP_INFO;
+ request.query_hw_ip.type = type;
+ request.query_hw_ip.ip_instance = ip_instance;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
+
+int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
+ unsigned ip_instance, unsigned index,
+ uint32_t *version, uint32_t *feature)
+{
+ struct drm_amdgpu_info request;
+ struct drm_amdgpu_info_firmware firmware;
+ int r;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)&firmware;
+ request.return_size = sizeof(firmware);
+ request.query = AMDGPU_INFO_FW_VERSION;
+ request.query_fw.fw_type = fw_type;
+ request.query_fw.ip_instance = ip_instance;
+ request.query_fw.index = index;
+
+ r = drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+ if (r)
+ return r;
+
+ *version = firmware.ver;
+ *feature = firmware.feature;
+ return 0;
+}
+
+drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
+{
+ int r, i;
+
+ r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(dev->dev_info),
+ &dev->dev_info);
+ if (r)
+ return r;
+
+ dev->info.asic_id = dev->dev_info.device_id;
+ dev->info.chip_rev = dev->dev_info.chip_rev;
+ dev->info.chip_external_rev = dev->dev_info.external_rev;
+ dev->info.family_id = dev->dev_info.family;
+ dev->info.max_engine_clk = dev->dev_info.max_engine_clock;
+ dev->info.max_memory_clk = dev->dev_info.max_memory_clock;
+ dev->info.gpu_counter_freq = dev->dev_info.gpu_counter_freq;
+ dev->info.enabled_rb_pipes_mask = dev->dev_info.enabled_rb_pipes_mask;
+ dev->info.rb_pipes = dev->dev_info.num_rb_pipes;
+ dev->info.ids_flags = dev->dev_info.ids_flags;
+ dev->info.num_hw_gfx_contexts = dev->dev_info.num_hw_gfx_contexts;
+ dev->info.num_shader_engines = dev->dev_info.num_shader_engines;
+ dev->info.num_shader_arrays_per_engine =
+ dev->dev_info.num_shader_arrays_per_engine;
+ dev->info.vram_type = dev->dev_info.vram_type;
+ dev->info.vram_bit_width = dev->dev_info.vram_bit_width;
+ dev->info.ce_ram_size = dev->dev_info.ce_ram_size;
+ dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config;
+ dev->info.pci_rev_id = dev->dev_info.pci_rev;
+
+ for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
+ unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
+ (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
+ AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
+
+ r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
+ &dev->info.backend_disable[i]);
+ if (r)
+ return r;
+ /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
+ dev->info.backend_disable[i] =
+ (dev->info.backend_disable[i] >> 16) & 0xff;
+
+ r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
+ &dev->info.pa_sc_raster_cfg[i]);
+ if (r)
+ return r;
+
+ r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
+ &dev->info.pa_sc_raster_cfg1[i]);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
+ dev->info.gb_tile_mode);
+ if (r)
+ return r;
+
+ r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
+ dev->info.gb_macro_tile_mode);
+ if (r)
+ return r;
+
+ r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
+ &dev->info.gb_addr_cfg);
+ if (r)
+ return r;
+
+ r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
+ &dev->info.mc_arb_ramcfg);
+ if (r)
+ return r;
+
+ dev->info.cu_active_number = dev->dev_info.cu_active_number;
+ dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
+ memcpy(&dev->info.cu_bitmap[0][0], &dev->dev_info.cu_bitmap[0][0], sizeof(dev->info.cu_bitmap));
+
+ /* TODO: info->max_quad_shader_pipes is not set */
+ /* TODO: info->avail_quad_shader_pipes is not set */
+ /* TODO: info->cache_entries_per_quad_pipe is not set */
+ return 0;
+}
+
+int amdgpu_query_gpu_info(amdgpu_device_handle dev,
+ struct amdgpu_gpu_info *info)
+{
+ /* Get ASIC info*/
+ *info = dev->info;
+
+ return 0;
+}
+
+int amdgpu_query_heap_info(amdgpu_device_handle dev,
+ uint32_t heap,
+ uint32_t flags,
+ struct amdgpu_heap_info *info)
+{
+ struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
+ int r;
+
+ r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_GTT,
+ sizeof(vram_gtt_info), &vram_gtt_info);
+ if (r)
+ return r;
+
+ /* Get heap information */
+ switch (heap) {
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ /* query visible only vram heap */
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ info->heap_size = vram_gtt_info.vram_cpu_accessible_size;
+ else /* query total vram heap */
+ info->heap_size = vram_gtt_info.vram_size;
+
+ info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
+
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ r = amdgpu_query_info(dev, AMDGPU_INFO_VIS_VRAM_USAGE,
+ sizeof(info->heap_usage),
+ &info->heap_usage);
+ else
+ r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_USAGE,
+ sizeof(info->heap_usage),
+ &info->heap_usage);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ info->heap_size = vram_gtt_info.gtt_size;
+ info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
+
+ r = amdgpu_query_info(dev, AMDGPU_INFO_GTT_USAGE,
+ sizeof(info->heap_usage),
+ &info->heap_usage);
+ if (r)
+ return r;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_query_gds_info(amdgpu_device_handle dev,
+ struct amdgpu_gds_resource_info *gds_info)
+{
+ struct drm_amdgpu_info_gds gds_config = {};
+ int r;
+
+ if (gds_info == NULL)
+ return -EINVAL;
+
+ r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG,
+ sizeof(gds_config), &gds_config);
+ if (r)
+ return r;
+
+ gds_info->gds_gfx_partition_size = gds_config.gds_gfx_partition_size;
+ gds_info->compute_partition_size = gds_config.compute_partition_size;
+ gds_info->gds_total_size = gds_config.gds_total_size;
+ gds_info->gws_per_gfx_partition = gds_config.gws_per_gfx_partition;
+ gds_info->gws_per_compute_partition = gds_config.gws_per_compute_partition;
+ gds_info->oa_per_gfx_partition = gds_config.oa_per_gfx_partition;
+ gds_info->oa_per_compute_partition = gds_config.oa_per_compute_partition;
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/amdgpu/amdgpu_internal.h b/chromium/third_party/libdrm/src/amdgpu/amdgpu_internal.h
new file mode 100644
index 00000000000..4f039b68fa5
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/amdgpu_internal.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright © 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AMDGPU_INTERNAL_H_
+#define _AMDGPU_INTERNAL_H_
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <assert.h>
+#include <pthread.h>
+
+#include "libdrm_macros.h"
+#include "xf86atomic.h"
+#include "amdgpu.h"
+#include "util_double_list.h"
+
+#define AMDGPU_CS_MAX_RINGS 8
+/* do not use below macro if b is not power of 2 aligned value */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y))
+
+#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff
+#define AMDGPU_NULL_SUBMIT_SEQ 0
+
+struct amdgpu_bo_va_hole {
+ struct list_head list;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct amdgpu_bo_va_mgr {
+ /* the start virtual address */
+ uint64_t va_offset;
+ uint64_t va_max;
+ struct list_head va_holes;
+ pthread_mutex_t bo_va_mutex;
+ uint32_t va_alignment;
+};
+
+struct amdgpu_va {
+ amdgpu_device_handle dev;
+ uint64_t address;
+ uint64_t size;
+ enum amdgpu_gpu_va_range range;
+ struct amdgpu_bo_va_mgr *vamgr;
+};
+
+struct amdgpu_device {
+ atomic_t refcount;
+ int fd;
+ int flink_fd;
+ unsigned major_version;
+ unsigned minor_version;
+
+ /** List of buffer handles. Protected by bo_table_mutex. */
+ struct util_hash_table *bo_handles;
+ /** List of buffer GEM flink names. Protected by bo_table_mutex. */
+ struct util_hash_table *bo_flink_names;
+ /** This protects all hash tables. */
+ pthread_mutex_t bo_table_mutex;
+ struct drm_amdgpu_info_device dev_info;
+ struct amdgpu_gpu_info info;
+ /** The global VA manager for the whole virtual address space */
+ struct amdgpu_bo_va_mgr *vamgr;
+ /** The VA manager for the 32bit address space */
+ struct amdgpu_bo_va_mgr *vamgr_32;
+};
+
+struct amdgpu_bo {
+ atomic_t refcount;
+ struct amdgpu_device *dev;
+
+ uint64_t alloc_size;
+
+ uint32_t handle;
+ uint32_t flink_name;
+
+ pthread_mutex_t cpu_access_mutex;
+ void *cpu_ptr;
+ int cpu_map_count;
+};
+
+struct amdgpu_bo_list {
+ struct amdgpu_device *dev;
+
+ uint32_t handle;
+};
+
+struct amdgpu_context {
+ struct amdgpu_device *dev;
+ /** Mutex for accessing fences and to maintain command submissions
+ in good sequence. */
+ pthread_mutex_t sequence_mutex;
+ /* context id*/
+ uint32_t id;
+ uint64_t last_seq[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
+ struct list_head sem_list[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
+};
+
+/**
+ * Structure describing sw semaphore based on scheduler
+ *
+ */
+struct amdgpu_semaphore {
+ atomic_t refcount;
+ struct list_head list;
+ struct amdgpu_cs_fence signal_fence;
+};
+
+/**
+ * Functions.
+ */
+
+drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
+
+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
+ uint64_t max, uint64_t alignment);
+
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
+
+drm_private uint64_t
+amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
+ uint64_t alignment, uint64_t base_required);
+
+drm_private void
+amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size);
+
+drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev);
+
+drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout);
+
+/**
+ * Inline functions.
+ */
+
+/**
+ * Increment src and decrement dst as if we were updating references
+ * for an assignment between 2 pointers of some objects.
+ *
+ * \return true if dst is 0
+ */
+static inline bool update_references(atomic_t *dst, atomic_t *src)
+{
+ if (dst != src) {
+ /* bump src first */
+ if (src) {
+ assert(atomic_read(src) > 0);
+ atomic_inc(src);
+ }
+ if (dst) {
+ assert(atomic_read(dst) > 0);
+ return atomic_dec_and_test(dst);
+ }
+ }
+ return false;
+}
+
+/**
+ * Assignment between two amdgpu_bo pointers with reference counting.
+ *
+ * Usage:
+ * struct amdgpu_bo *dst = ... , *src = ...;
+ *
+ * dst = src;
+ * // No reference counting. Only use this when you need to move
+ * // a reference from one pointer to another.
+ *
+ * amdgpu_bo_reference(&dst, src);
+ * // Reference counters are updated. dst is decremented and src is
+ * // incremented. dst is freed if its reference counter is 0.
+ */
+static inline void amdgpu_bo_reference(struct amdgpu_bo **dst,
+ struct amdgpu_bo *src)
+{
+ if (update_references(&(*dst)->refcount, &src->refcount))
+ amdgpu_bo_free_internal(*dst);
+ *dst = src;
+}
+
+#endif
diff --git a/chromium/third_party/libdrm/src/amdgpu/amdgpu_vamgr.c b/chromium/third_party/libdrm/src/amdgpu/amdgpu_vamgr.c
new file mode 100644
index 00000000000..8a707cbcde2
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/amdgpu_vamgr.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include "amdgpu.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "util_math.h"
+
+int amdgpu_va_range_query(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end)
+{
+ if (type == amdgpu_gpu_va_range_general) {
+ *start = dev->dev_info.virtual_address_offset;
+ *end = dev->dev_info.virtual_address_max;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
+ uint64_t max, uint64_t alignment)
+{
+ mgr->va_offset = start;
+ mgr->va_max = max;
+ mgr->va_alignment = alignment;
+
+ list_inithead(&mgr->va_holes);
+ pthread_mutex_init(&mgr->bo_va_mutex, NULL);
+}
+
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
+{
+ struct amdgpu_bo_va_hole *hole, *tmp;
+ LIST_FOR_EACH_ENTRY_SAFE(hole, tmp, &mgr->va_holes, list) {
+ list_del(&hole->list);
+ free(hole);
+ }
+ pthread_mutex_destroy(&mgr->bo_va_mutex);
+}
+
+drm_private uint64_t
+amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
+ uint64_t alignment, uint64_t base_required)
+{
+ struct amdgpu_bo_va_hole *hole, *n;
+ uint64_t offset = 0, waste = 0;
+
+ alignment = MAX2(alignment, mgr->va_alignment);
+ size = ALIGN(size, mgr->va_alignment);
+
+ if (base_required % alignment)
+ return AMDGPU_INVALID_VA_ADDRESS;
+
+ pthread_mutex_lock(&mgr->bo_va_mutex);
+ /* TODO: using more appropriate way to track the holes */
+ /* first look for a hole */
+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
+ if (base_required) {
+ if(hole->offset > base_required ||
+ (hole->offset + hole->size) < (base_required + size))
+ continue;
+ waste = base_required - hole->offset;
+ offset = base_required;
+ } else {
+ offset = hole->offset;
+ waste = offset % alignment;
+ waste = waste ? alignment - waste : 0;
+ offset += waste;
+ if (offset >= (hole->offset + hole->size)) {
+ continue;
+ }
+ }
+ if (!waste && hole->size == size) {
+ offset = hole->offset;
+ list_del(&hole->list);
+ free(hole);
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+ }
+ if ((hole->size - waste) > size) {
+ if (waste) {
+ n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ n->size = waste;
+ n->offset = hole->offset;
+ list_add(&n->list, &hole->list);
+ }
+ hole->size -= (size + waste);
+ hole->offset += size + waste;
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+ }
+ if ((hole->size - waste) == size) {
+ hole->size = waste;
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+ }
+ }
+
+ if (base_required) {
+ if (base_required < mgr->va_offset) {
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return AMDGPU_INVALID_VA_ADDRESS;
+ }
+ offset = mgr->va_offset;
+ waste = base_required - mgr->va_offset;
+ } else {
+ offset = mgr->va_offset;
+ waste = offset % alignment;
+ waste = waste ? alignment - waste : 0;
+ }
+
+ if (offset + waste + size > mgr->va_max) {
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return AMDGPU_INVALID_VA_ADDRESS;
+ }
+
+ if (waste) {
+ n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ n->size = waste;
+ n->offset = offset;
+ list_add(&n->list, &mgr->va_holes);
+ }
+
+ offset += waste;
+ mgr->va_offset += size + waste;
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+}
+
+drm_private void
+amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
+{
+ struct amdgpu_bo_va_hole *hole;
+
+ if (va == AMDGPU_INVALID_VA_ADDRESS)
+ return;
+
+ size = ALIGN(size, mgr->va_alignment);
+
+ pthread_mutex_lock(&mgr->bo_va_mutex);
+ if ((va + size) == mgr->va_offset) {
+ mgr->va_offset = va;
+ /* Delete uppermost hole if it reaches the new top */
+ if (!LIST_IS_EMPTY(&mgr->va_holes)) {
+ hole = container_of(mgr->va_holes.next, hole, list);
+ if ((hole->offset + hole->size) == va) {
+ mgr->va_offset = hole->offset;
+ list_del(&hole->list);
+ free(hole);
+ }
+ }
+ } else {
+ struct amdgpu_bo_va_hole *next;
+
+ hole = container_of(&mgr->va_holes, hole, list);
+ LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
+ if (next->offset < va)
+ break;
+ hole = next;
+ }
+
+ if (&hole->list != &mgr->va_holes) {
+ /* Grow upper hole if it's adjacent */
+ if (hole->offset == (va + size)) {
+ hole->offset = va;
+ hole->size += size;
+ /* Merge lower hole if it's adjacent */
+ if (next != hole
+ && &next->list != &mgr->va_holes
+ && (next->offset + next->size) == va) {
+ next->size += hole->size;
+ list_del(&hole->list);
+ free(hole);
+ }
+ goto out;
+ }
+ }
+
+ /* Grow lower hole if it's adjacent */
+ if (next != hole && &next->list != &mgr->va_holes &&
+ (next->offset + next->size) == va) {
+ next->size += size;
+ goto out;
+ }
+
+ /* FIXME on allocation failure we just lose virtual address space
+ * maybe print a warning
+ */
+ next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ if (next) {
+ next->size = size;
+ next->offset = va;
+ list_add(&next->list, &hole->list);
+ }
+ }
+out:
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+}
+
+int amdgpu_va_range_alloc(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range va_range_type,
+ uint64_t size,
+ uint64_t va_base_alignment,
+ uint64_t va_base_required,
+ uint64_t *va_base_allocated,
+ amdgpu_va_handle *va_range_handle,
+ uint64_t flags)
+{
+ struct amdgpu_bo_va_mgr *vamgr;
+
+ if (flags & AMDGPU_VA_RANGE_32_BIT)
+ vamgr = dev->vamgr_32;
+ else
+ vamgr = dev->vamgr;
+
+ va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
+ size = ALIGN(size, vamgr->va_alignment);
+
+ *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
+ va_base_alignment, va_base_required);
+
+ if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
+ (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
+ /* fallback to 32bit address */
+ vamgr = dev->vamgr_32;
+ *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
+ va_base_alignment, va_base_required);
+ }
+
+ if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
+ struct amdgpu_va* va;
+ va = calloc(1, sizeof(struct amdgpu_va));
+ if(!va){
+ amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
+ return -ENOMEM;
+ }
+ va->dev = dev;
+ va->address = *va_base_allocated;
+ va->size = size;
+ va->range = va_range_type;
+ va->vamgr = vamgr;
+ *va_range_handle = va;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
+{
+ if(!va_range_handle || !va_range_handle->address)
+ return 0;
+
+ amdgpu_vamgr_free_va(va_range_handle->vamgr,
+ va_range_handle->address,
+ va_range_handle->size);
+ free(va_range_handle);
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/amdgpu/libdrm_amdgpu.pc.in b/chromium/third_party/libdrm/src/amdgpu/libdrm_amdgpu.pc.in
new file mode 100644
index 00000000000..f1c552a623f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/libdrm_amdgpu.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_amdgpu
+Description: Userspace interface to kernel DRM services for amdgpu
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -ldrm_amdgpu
+Cflags: -I${includedir} -I${includedir}/libdrm
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/amdgpu/util_hash.c b/chromium/third_party/libdrm/src/amdgpu/util_hash.c
new file mode 100644
index 00000000000..87cb671b6a0
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/util_hash.c
@@ -0,0 +1,387 @@
+/**************************************************************************
+ *
+ * Copyright 2007 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin <zackr@vmware.com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "util_hash.h"
+
+#include <stdlib.h>
+#include <assert.h>
+
+#define MAX(a, b) ((a > b) ? (a) : (b))
+
+static const int MinNumBits = 4;
+
+static const unsigned char prime_deltas[] = {
+ 0, 0, 1, 3, 1, 5, 3, 3, 1, 9, 7, 5, 3, 9, 25, 3,
+ 1, 21, 3, 21, 7, 15, 9, 5, 3, 29, 15, 0, 0, 0, 0, 0
+};
+
+static int primeForNumBits(int numBits)
+{
+ return (1 << numBits) + prime_deltas[numBits];
+}
+
+/* Returns the smallest integer n such that
+ primeForNumBits(n) >= hint.
+*/
+static int countBits(int hint)
+{
+ int numBits = 0;
+ int bits = hint;
+
+ while (bits > 1) {
+ bits >>= 1;
+ numBits++;
+ }
+
+ if (numBits >= (int)sizeof(prime_deltas)) {
+ numBits = sizeof(prime_deltas) - 1;
+ } else if (primeForNumBits(numBits) < hint) {
+ ++numBits;
+ }
+ return numBits;
+}
+
+struct util_node {
+ struct util_node *next;
+ unsigned key;
+ void *value;
+};
+
+struct util_hash_data {
+ struct util_node *fakeNext;
+ struct util_node **buckets;
+ int size;
+ int nodeSize;
+ short userNumBits;
+ short numBits;
+ int numBuckets;
+};
+
+struct util_hash {
+ union {
+ struct util_hash_data *d;
+ struct util_node *e;
+ } data;
+};
+
+static void *util_data_allocate_node(struct util_hash_data *hash)
+{
+ return malloc(hash->nodeSize);
+}
+
+static void util_free_node(struct util_node *node)
+{
+ free(node);
+}
+
+static struct util_node *
+util_hash_create_node(struct util_hash *hash,
+ unsigned akey, void *avalue,
+ struct util_node **anextNode)
+{
+ struct util_node *node = util_data_allocate_node(hash->data.d);
+
+ if (!node)
+ return NULL;
+
+ node->key = akey;
+ node->value = avalue;
+
+ node->next = (struct util_node*)(*anextNode);
+ *anextNode = node;
+ ++hash->data.d->size;
+ return node;
+}
+
+static void util_data_rehash(struct util_hash_data *hash, int hint)
+{
+ if (hint < 0) {
+ hint = countBits(-hint);
+ if (hint < MinNumBits)
+ hint = MinNumBits;
+ hash->userNumBits = (short)hint;
+ while (primeForNumBits(hint) < (hash->size >> 1))
+ ++hint;
+ } else if (hint < MinNumBits) {
+ hint = MinNumBits;
+ }
+
+ if (hash->numBits != hint) {
+ struct util_node *e = (struct util_node *)(hash);
+ struct util_node **oldBuckets = hash->buckets;
+ int oldNumBuckets = hash->numBuckets;
+ int i = 0;
+
+ hash->numBits = (short)hint;
+ hash->numBuckets = primeForNumBits(hint);
+ hash->buckets = malloc(sizeof(struct util_node*) * hash->numBuckets);
+ for (i = 0; i < hash->numBuckets; ++i)
+ hash->buckets[i] = e;
+
+ for (i = 0; i < oldNumBuckets; ++i) {
+ struct util_node *firstNode = oldBuckets[i];
+ while (firstNode != e) {
+ unsigned h = firstNode->key;
+ struct util_node *lastNode = firstNode;
+ struct util_node *afterLastNode;
+ struct util_node **beforeFirstNode;
+
+ while (lastNode->next != e && lastNode->next->key == h)
+ lastNode = lastNode->next;
+
+ afterLastNode = lastNode->next;
+ beforeFirstNode = &hash->buckets[h % hash->numBuckets];
+ while (*beforeFirstNode != e)
+ beforeFirstNode = &(*beforeFirstNode)->next;
+ lastNode->next = *beforeFirstNode;
+ *beforeFirstNode = firstNode;
+ firstNode = afterLastNode;
+ }
+ }
+ free(oldBuckets);
+ }
+}
+
+static void util_data_might_grow(struct util_hash_data *hash)
+{
+ if (hash->size >= hash->numBuckets)
+ util_data_rehash(hash, hash->numBits + 1);
+}
+
+static void util_data_has_shrunk(struct util_hash_data *hash)
+{
+ if (hash->size <= (hash->numBuckets >> 3) &&
+ hash->numBits > hash->userNumBits) {
+ int max = MAX(hash->numBits-2, hash->userNumBits);
+ util_data_rehash(hash, max);
+ }
+}
+
+static struct util_node *util_data_first_node(struct util_hash_data *hash)
+{
+ struct util_node *e = (struct util_node *)(hash);
+ struct util_node **bucket = hash->buckets;
+ int n = hash->numBuckets;
+ while (n--) {
+ if (*bucket != e)
+ return *bucket;
+ ++bucket;
+ }
+ return e;
+}
+
+static struct util_node **util_hash_find_node(struct util_hash *hash, unsigned akey)
+{
+ struct util_node **node;
+
+ if (hash->data.d->numBuckets) {
+ node = (struct util_node **)(&hash->data.d->buckets[akey % hash->data.d->numBuckets]);
+ assert(*node == hash->data.e || (*node)->next);
+ while (*node != hash->data.e && (*node)->key != akey)
+ node = &(*node)->next;
+ } else {
+ node = (struct util_node **)((const struct util_node * const *)(&hash->data.e));
+ }
+ return node;
+}
+
+drm_private struct util_hash_iter
+util_hash_insert(struct util_hash *hash, unsigned key, void *data)
+{
+ util_data_might_grow(hash->data.d);
+
+ {
+ struct util_node **nextNode = util_hash_find_node(hash, key);
+ struct util_node *node = util_hash_create_node(hash, key, data, nextNode);
+ if (!node) {
+ struct util_hash_iter null_iter = {hash, 0};
+ return null_iter;
+ }
+
+ {
+ struct util_hash_iter iter = {hash, node};
+ return iter;
+ }
+ }
+}
+
+drm_private struct util_hash *util_hash_create(void)
+{
+ struct util_hash *hash = malloc(sizeof(struct util_hash));
+ if (!hash)
+ return NULL;
+
+ hash->data.d = malloc(sizeof(struct util_hash_data));
+ if (!hash->data.d) {
+ free(hash);
+ return NULL;
+ }
+
+ hash->data.d->fakeNext = 0;
+ hash->data.d->buckets = 0;
+ hash->data.d->size = 0;
+ hash->data.d->nodeSize = sizeof(struct util_node);
+ hash->data.d->userNumBits = (short)MinNumBits;
+ hash->data.d->numBits = 0;
+ hash->data.d->numBuckets = 0;
+
+ return hash;
+}
+
+drm_private void util_hash_delete(struct util_hash *hash)
+{
+ struct util_node *e_for_x = (struct util_node *)(hash->data.d);
+ struct util_node **bucket = (struct util_node **)(hash->data.d->buckets);
+ int n = hash->data.d->numBuckets;
+ while (n--) {
+ struct util_node *cur = *bucket++;
+ while (cur != e_for_x) {
+ struct util_node *next = cur->next;
+ util_free_node(cur);
+ cur = next;
+ }
+ }
+ free(hash->data.d->buckets);
+ free(hash->data.d);
+ free(hash);
+}
+
+drm_private struct util_hash_iter
+util_hash_find(struct util_hash *hash, unsigned key)
+{
+ struct util_node **nextNode = util_hash_find_node(hash, key);
+ struct util_hash_iter iter = {hash, *nextNode};
+ return iter;
+}
+
+drm_private unsigned util_hash_iter_key(struct util_hash_iter iter)
+{
+ if (!iter.node || iter.hash->data.e == iter.node)
+ return 0;
+ return iter.node->key;
+}
+
+drm_private void *util_hash_iter_data(struct util_hash_iter iter)
+{
+ if (!iter.node || iter.hash->data.e == iter.node)
+ return 0;
+ return iter.node->value;
+}
+
+static struct util_node *util_hash_data_next(struct util_node *node)
+{
+ union {
+ struct util_node *next;
+ struct util_node *e;
+ struct util_hash_data *d;
+ } a;
+ int start;
+ struct util_node **bucket;
+ int n;
+
+ a.next = node->next;
+ if (!a.next) {
+ /* iterating beyond the last element */
+ return 0;
+ }
+ if (a.next->next)
+ return a.next;
+
+ start = (node->key % a.d->numBuckets) + 1;
+ bucket = a.d->buckets + start;
+ n = a.d->numBuckets - start;
+ while (n--) {
+ if (*bucket != a.e)
+ return *bucket;
+ ++bucket;
+ }
+ return a.e;
+}
+
+drm_private struct util_hash_iter
+util_hash_iter_next(struct util_hash_iter iter)
+{
+ struct util_hash_iter next = {iter.hash, util_hash_data_next(iter.node)};
+ return next;
+}
+
+drm_private int util_hash_iter_is_null(struct util_hash_iter iter)
+{
+ if (!iter.node || iter.node == iter.hash->data.e)
+ return 1;
+ return 0;
+}
+
+drm_private void *util_hash_take(struct util_hash *hash, unsigned akey)
+{
+ struct util_node **node = util_hash_find_node(hash, akey);
+ if (*node != hash->data.e) {
+ void *t = (*node)->value;
+ struct util_node *next = (*node)->next;
+ util_free_node(*node);
+ *node = next;
+ --hash->data.d->size;
+ util_data_has_shrunk(hash->data.d);
+ return t;
+ }
+ return 0;
+}
+
+drm_private struct util_hash_iter util_hash_first_node(struct util_hash *hash)
+{
+ struct util_hash_iter iter = {hash, util_data_first_node(hash->data.d)};
+ return iter;
+}
+
+drm_private struct util_hash_iter
+util_hash_erase(struct util_hash *hash, struct util_hash_iter iter)
+{
+ struct util_hash_iter ret = iter;
+ struct util_node *node = iter.node;
+ struct util_node **node_ptr;
+
+ if (node == hash->data.e)
+ return iter;
+
+ ret = util_hash_iter_next(ret);
+ node_ptr = (struct util_node**)(&hash->data.d->buckets[node->key % hash->data.d->numBuckets]);
+ while (*node_ptr != node)
+ node_ptr = &(*node_ptr)->next;
+ *node_ptr = node->next;
+ util_free_node(node);
+ --hash->data.d->size;
+ return ret;
+}
diff --git a/chromium/third_party/libdrm/src/amdgpu/util_hash.h b/chromium/third_party/libdrm/src/amdgpu/util_hash.h
new file mode 100644
index 00000000000..01a4779ba0d
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/util_hash.h
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright 2007 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Hash implementation.
+ *
+ * This file provides a hash implementation that is capable of dealing
+ * with collisions. It stores colliding entries in linked list. All
+ * functions operating on the hash return an iterator. The iterator
+ * itself points to the collision list. If there wasn't any collision
+ * the list will have just one entry, otherwise client code should
+ * iterate over the entries to find the exact entry among ones that
+ * had the same key (e.g. memcmp could be used on the data to check
+ * that)
+ *
+ * @author Zack Rusin <zackr@vmware.com>
+ */
+
+#ifndef UTIL_HASH_H
+#define UTIL_HASH_H
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdbool.h>
+
+#include "libdrm_macros.h"
+
+struct util_hash;
+struct util_node;
+
+struct util_hash_iter {
+ struct util_hash *hash;
+ struct util_node *node;
+};
+
+
+drm_private struct util_hash *util_hash_create(void);
+drm_private void util_hash_delete(struct util_hash *hash);
+
+
+/**
+ * Adds a data with the given key to the hash. If entry with the given
+ * key is already in the hash, this current entry is instered before it
+ * in the collision list.
+ * Function returns iterator pointing to the inserted item in the hash.
+ */
+drm_private struct util_hash_iter
+util_hash_insert(struct util_hash *hash, unsigned key, void *data);
+
+/**
+ * Removes the item pointed to by the current iterator from the hash.
+ * Note that the data itself is not erased and if it was a malloc'ed pointer
+ * it will have to be freed after calling this function by the callee.
+ * Function returns iterator pointing to the item after the removed one in
+ * the hash.
+ */
+drm_private struct util_hash_iter
+util_hash_erase(struct util_hash *hash, struct util_hash_iter iter);
+
+drm_private void *util_hash_take(struct util_hash *hash, unsigned key);
+
+
+drm_private struct util_hash_iter util_hash_first_node(struct util_hash *hash);
+
+/**
+ * Return an iterator pointing to the first entry in the collision list.
+ */
+drm_private struct util_hash_iter
+util_hash_find(struct util_hash *hash, unsigned key);
+
+
+drm_private int util_hash_iter_is_null(struct util_hash_iter iter);
+drm_private unsigned util_hash_iter_key(struct util_hash_iter iter);
+drm_private void *util_hash_iter_data(struct util_hash_iter iter);
+
+
+drm_private struct util_hash_iter
+util_hash_iter_next(struct util_hash_iter iter);
+
+#endif
diff --git a/chromium/third_party/libdrm/src/amdgpu/util_hash_table.c b/chromium/third_party/libdrm/src/amdgpu/util_hash_table.c
new file mode 100644
index 00000000000..fa7f6eabbfa
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/util_hash_table.c
@@ -0,0 +1,262 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * General purpose hash table implementation.
+ *
+ * Just uses the util_hash for now, but it might be better switch to a linear
+ * probing hash table implementation at some point -- as it is said they have
+ * better lookup and cache performance and it appears to be possible to write
+ * a lock-free implementation of such hash tables .
+ *
+ * @author José Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "util_hash_table.h"
+#include "util_hash.h"
+
+#include <stdlib.h>
+#include <assert.h>
+
+struct util_hash_table
+{
+ struct util_hash *head;
+
+ /** Hash function */
+ unsigned (*make_hash)(void *key);
+
+ /** Compare two keys */
+ int (*compare)(void *key1, void *key2);
+};
+
+struct util_hash_table_item
+{
+ void *key;
+ void *value;
+};
+
+
+static struct util_hash_table_item *
+util_hash_table_item(struct util_hash_iter iter)
+{
+ return (struct util_hash_table_item *)util_hash_iter_data(iter);
+}
+
+drm_private struct util_hash_table *
+util_hash_table_create(unsigned (*hash)(void *key),
+ int (*compare)(void *key1, void *key2))
+{
+ struct util_hash_table *ht;
+
+ ht = malloc(sizeof(struct util_hash_table));
+ if(!ht)
+ return NULL;
+
+ ht->head = util_hash_create();
+ if(!ht->head) {
+ free(ht);
+ return NULL;
+ }
+
+ ht->make_hash = hash;
+ ht->compare = compare;
+
+ return ht;
+}
+
+static struct util_hash_iter
+util_hash_table_find_iter(struct util_hash_table *ht,
+ void *key, unsigned key_hash)
+{
+ struct util_hash_iter iter;
+ struct util_hash_table_item *item;
+
+ iter = util_hash_find(ht->head, key_hash);
+ while (!util_hash_iter_is_null(iter)) {
+ item = (struct util_hash_table_item *)util_hash_iter_data(iter);
+ if (!ht->compare(item->key, key))
+ break;
+ iter = util_hash_iter_next(iter);
+ }
+
+ return iter;
+}
+
+static struct util_hash_table_item *
+util_hash_table_find_item(struct util_hash_table *ht,
+ void *key, unsigned key_hash)
+{
+ struct util_hash_iter iter;
+ struct util_hash_table_item *item;
+
+ iter = util_hash_find(ht->head, key_hash);
+ while (!util_hash_iter_is_null(iter)) {
+ item = (struct util_hash_table_item *)util_hash_iter_data(iter);
+ if (!ht->compare(item->key, key))
+ return item;
+ iter = util_hash_iter_next(iter);
+ }
+
+ return NULL;
+}
+
+drm_private void
+util_hash_table_set(struct util_hash_table *ht, void *key, void *value)
+{
+ unsigned key_hash;
+ struct util_hash_table_item *item;
+ struct util_hash_iter iter;
+
+ assert(ht);
+ if (!ht)
+ return;
+
+ key_hash = ht->make_hash(key);
+
+ item = util_hash_table_find_item(ht, key, key_hash);
+ if(item) {
+ /* TODO: key/value destruction? */
+ item->value = value;
+ return;
+ }
+
+ item = malloc(sizeof(struct util_hash_table_item));
+ if(!item)
+ return;
+
+ item->key = key;
+ item->value = value;
+
+ iter = util_hash_insert(ht->head, key_hash, item);
+ if(util_hash_iter_is_null(iter)) {
+ free(item);
+ return;
+ }
+}
+
+drm_private void *util_hash_table_get(struct util_hash_table *ht, void *key)
+{
+ unsigned key_hash;
+ struct util_hash_table_item *item;
+
+ assert(ht);
+ if (!ht)
+ return NULL;
+
+ key_hash = ht->make_hash(key);
+
+ item = util_hash_table_find_item(ht, key, key_hash);
+ if(!item)
+ return NULL;
+
+ return item->value;
+}
+
+drm_private void util_hash_table_remove(struct util_hash_table *ht, void *key)
+{
+ unsigned key_hash;
+ struct util_hash_iter iter;
+ struct util_hash_table_item *item;
+
+ assert(ht);
+ if (!ht)
+ return;
+
+ key_hash = ht->make_hash(key);
+
+ iter = util_hash_table_find_iter(ht, key, key_hash);
+ if(util_hash_iter_is_null(iter))
+ return;
+
+ item = util_hash_table_item(iter);
+ assert(item);
+ free(item);
+
+ util_hash_erase(ht->head, iter);
+}
+
+drm_private void util_hash_table_clear(struct util_hash_table *ht)
+{
+ struct util_hash_iter iter;
+ struct util_hash_table_item *item;
+
+ assert(ht);
+ if (!ht)
+ return;
+
+ iter = util_hash_first_node(ht->head);
+ while (!util_hash_iter_is_null(iter)) {
+ item = (struct util_hash_table_item *)util_hash_take(ht->head, util_hash_iter_key(iter));
+ free(item);
+ iter = util_hash_first_node(ht->head);
+ }
+}
+
+drm_private void util_hash_table_foreach(struct util_hash_table *ht,
+ void (*callback)(void *key, void *value, void *data),
+ void *data)
+{
+ struct util_hash_iter iter;
+ struct util_hash_table_item *item;
+
+ assert(ht);
+ if (!ht)
+ return;
+
+ iter = util_hash_first_node(ht->head);
+ while (!util_hash_iter_is_null(iter)) {
+ item = (struct util_hash_table_item *)util_hash_iter_data(iter);
+ callback(item->key, item->value, data);
+ iter = util_hash_iter_next(iter);
+ }
+}
+
+drm_private void util_hash_table_destroy(struct util_hash_table *ht)
+{
+ struct util_hash_iter iter;
+ struct util_hash_table_item *item;
+
+ assert(ht);
+ if (!ht)
+ return;
+
+ iter = util_hash_first_node(ht->head);
+ while (!util_hash_iter_is_null(iter)) {
+ item = (struct util_hash_table_item *)util_hash_iter_data(iter);
+ free(item);
+ iter = util_hash_iter_next(iter);
+ }
+
+ util_hash_delete(ht->head);
+ free(ht);
+}
diff --git a/chromium/third_party/libdrm/src/amdgpu/util_hash_table.h b/chromium/third_party/libdrm/src/amdgpu/util_hash_table.h
new file mode 100644
index 00000000000..e00012890e7
--- /dev/null
+++ b/chromium/third_party/libdrm/src/amdgpu/util_hash_table.h
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * General purpose hash table.
+ *
+ * @author José Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef U_HASH_TABLE_H_
+#define U_HASH_TABLE_H_
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "libdrm_macros.h"
+
+/**
+ * Generic purpose hash table.
+ */
+struct util_hash_table;
+
+/**
+ * Create an hash table.
+ *
+ * @param hash hash function
+ * @param compare should return 0 for two equal keys.
+ */
+drm_private struct util_hash_table *
+util_hash_table_create(unsigned (*hash)(void *key),
+ int (*compare)(void *key1, void *key2));
+
+drm_private void
+util_hash_table_set(struct util_hash_table *ht, void *key, void *value);
+
+drm_private void *util_hash_table_get(struct util_hash_table *ht, void *key);
+
+drm_private void util_hash_table_remove(struct util_hash_table *ht, void *key);
+
+drm_private void util_hash_table_clear(struct util_hash_table *ht);
+
+drm_private void util_hash_table_foreach(struct util_hash_table *ht,
+ void (*callback)(void *key, void *value, void *data),
+ void *data);
+
+drm_private void util_hash_table_destroy(struct util_hash_table *ht);
+
+#endif /* U_HASH_TABLE_H_ */
diff --git a/chromium/third_party/libdrm/src/autogen.sh b/chromium/third_party/libdrm/src/autogen.sh
new file mode 100755
index 00000000000..c8960971d24
--- /dev/null
+++ b/chromium/third_party/libdrm/src/autogen.sh
@@ -0,0 +1,14 @@
+#! /bin/sh
+
+srcdir=`dirname "$0"`
+test -z "$srcdir" && srcdir=.
+
+ORIGDIR=`pwd`
+cd "$srcdir"
+
+autoreconf --force --verbose --install || exit 1
+cd "$ORIGDIR" || exit $?
+
+if test -z "$NOCONFIGURE"; then
+ "$srcdir"/configure "$@"
+fi
diff --git a/chromium/third_party/libdrm/src/configure.ac b/chromium/third_party/libdrm/src/configure.ac
new file mode 100644
index 00000000000..6c069f7b2ff
--- /dev/null
+++ b/chromium/third_party/libdrm/src/configure.ac
@@ -0,0 +1,584 @@
+# Copyright 2005 Adam Jackson.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# on the rights to use, copy, modify, merge, publish, distribute, sub
+# license, and/or sell copies of the Software, and to permit persons to whom
+# the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+# ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+AC_PREREQ([2.63])
+AC_INIT([libdrm],
+ [2.4.70],
+ [https://bugs.freedesktop.org/enter_bug.cgi?product=DRI],
+ [libdrm])
+
+AC_CONFIG_HEADERS([config.h])
+AC_CONFIG_SRCDIR([Makefile.am])
+AC_CONFIG_MACRO_DIR([m4])
+AC_CONFIG_AUX_DIR([build-aux])
+
+# Require xorg-macros minimum of 1.12 for XORG_WITH_XSLTPROC
+m4_ifndef([XORG_MACROS_VERSION],
+ [m4_fatal([must install xorg-macros 1.12 or later before running autoconf/autogen])])
+XORG_MACROS_VERSION(1.12)
+XORG_WITH_XSLTPROC
+XORG_MANPAGE_SECTIONS
+
+AM_INIT_AUTOMAKE([1.10 foreign dist-bzip2])
+
+# Enable quiet compiles on automake 1.11.
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+
+# Check for programs
+AC_PROG_CC
+AC_PROG_CC_C99
+
+if test "x$ac_cv_prog_cc_c99" = xno; then
+ AC_MSG_ERROR([Building libdrm requires C99 enabled compiler])
+fi
+
+AC_USE_SYSTEM_EXTENSIONS
+AC_SYS_LARGEFILE
+AC_FUNC_ALLOCA
+
+AC_HEADER_MAJOR
+AC_CHECK_HEADERS([sys/sysctl.h sys/select.h])
+
+# Initialize libtool
+LT_PREREQ([2.2])
+LT_INIT([disable-static])
+
+
+PKG_CHECK_MODULES(PTHREADSTUBS, pthread-stubs)
+AC_SUBST(PTHREADSTUBS_CFLAGS)
+AC_SUBST(PTHREADSTUBS_LIBS)
+
+pkgconfigdir=${libdir}/pkgconfig
+AC_SUBST(pkgconfigdir)
+AC_ARG_ENABLE([udev],
+ [AS_HELP_STRING([--enable-udev],
+ [Enable support for using udev instead of mknod (default: disabled)])],
+ [UDEV=$enableval], [UDEV=no])
+
+AC_ARG_ENABLE(libkms,
+ AS_HELP_STRING([--disable-libkms],
+ [Disable KMS mm abstraction library (default: auto, enabled on supported platforms)]),
+ [LIBKMS=$enableval], [LIBKMS=auto])
+
+AC_ARG_ENABLE(intel,
+ AS_HELP_STRING([--disable-intel],
+ [Enable support for intel's KMS API (default: auto, enabled on x86)]),
+ [INTEL=$enableval], [INTEL=auto])
+
+AC_ARG_ENABLE(radeon,
+ AS_HELP_STRING([--disable-radeon],
+ [Enable support for radeon's KMS API (default: auto)]),
+ [RADEON=$enableval], [RADEON=auto])
+
+AC_ARG_ENABLE(amdgpu,
+ AS_HELP_STRING([--disable-amdgpu],
+ [Enable support for amdgpu's KMS API (default: auto)]),
+ [AMDGPU=$enableval], [AMDGPU=auto])
+
+AC_ARG_ENABLE(nouveau,
+ AS_HELP_STRING([--disable-nouveau],
+ [Enable support for nouveau's KMS API (default: auto)]),
+ [NOUVEAU=$enableval], [NOUVEAU=auto])
+
+AC_ARG_ENABLE(vmwgfx,
+ AS_HELP_STRING([--disable-vmwgfx],
+ [Enable support for vmwgfx's KMS API (default: yes)]),
+ [VMWGFX=$enableval], [VMWGFX=yes])
+
+AC_ARG_ENABLE(omap-experimental-api,
+ AS_HELP_STRING([--enable-omap-experimental-api],
+ [Enable support for OMAP's experimental API (default: disabled)]),
+ [OMAP=$enableval], [OMAP=no])
+
+AC_ARG_ENABLE(exynos-experimental-api,
+ AS_HELP_STRING([--enable-exynos-experimental-api],
+ [Enable support for EXYNOS's experimental API (default: disabled)]),
+ [EXYNOS=$enableval], [EXYNOS=no])
+
+AC_ARG_ENABLE(freedreno,
+ AS_HELP_STRING([--disable-freedreno],
+ [Enable support for freedreno's KMS API (default: auto, enabled on arm)]),
+ [FREEDRENO=$enableval], [FREEDRENO=auto])
+
+AC_ARG_ENABLE(freedreno-kgsl,
+ AS_HELP_STRING([--enable-freedreno-kgsl],
+ [Enable support for freedreno's to use downstream android kernel API (default: disabled)]),
+ [FREEDRENO_KGSL=$enableval], [FREEDRENO_KGSL=no])
+
+AC_ARG_ENABLE(mediatek-experimental-api,
+ AS_HELP_STRING([--enable-mediatek-experimental-api],
+ [Enable support for Mediatek's experimental API (default: disabled)]),
+ [MEDIATEK=$enableval], [MEDIATEK=no])
+
+AC_ARG_ENABLE(tegra-experimental-api,
+ AS_HELP_STRING([--enable-tegra-experimental-api],
+ [Enable support for Tegra's experimental API (default: disabled)]),
+ [TEGRA=$enableval], [TEGRA=no])
+
+AC_ARG_ENABLE(vc4,
+ AS_HELP_STRING([--disable-vc4],
+ [Enable support for vc4's API (default: auto, enabled on arm)]),
+ [VC4=$enableval], [VC4=auto])
+
+AC_ARG_ENABLE(rockchip-experimental-api,
+ AS_HELP_STRING([--enable-rockchip-experimental-api],
+ [Enable support for rockchip's experimental API (default: disabled)]),
+ [ROCKCHIP=$enableval], [ROCKCHIP=no])
+
+AC_ARG_ENABLE(install-test-programs,
+ AS_HELP_STRING([--enable-install-test-programs],
+ [Install test programs (default: no)]),
+ [INSTALL_TESTS=$enableval], [INSTALL_TESTS=no])
+
+dnl ===========================================================================
+dnl check compiler flags
+AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
+ AC_MSG_CHECKING([whether $CC supports $1])
+
+ libdrm_save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $1"
+
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([ ])], [libdrm_cc_flag=yes], [libdrm_cc_flag=no])
+ CFLAGS="$libdrm_save_CFLAGS"
+
+ if test "x$libdrm_cc_flag" = "xyes"; then
+ ifelse([$2], , :, [$2])
+ else
+ ifelse([$3], , :, [$3])
+ fi
+ AC_MSG_RESULT([$libdrm_cc_flag])
+])
+
+dnl We use clock_gettime to check for timeouts in drmWaitVBlank
+
+AC_CHECK_FUNCS([clock_gettime], [CLOCK_LIB=],
+ [AC_CHECK_LIB([rt], [clock_gettime], [CLOCK_LIB=-lrt],
+ [AC_MSG_ERROR([Couldn't find clock_gettime])])])
+AC_SUBST([CLOCK_LIB])
+
+AC_CHECK_FUNCS([open_memstream], [HAVE_OPEN_MEMSTREAM=yes])
+
+dnl Use lots of warning flags with with gcc and compatible compilers
+
+dnl Note: if you change the following variable, the cache is automatically
+dnl skipped and all flags rechecked. So there's no need to do anything
+dnl else. If for any reason you need to force a recheck, just change
+dnl MAYBE_WARN in an ignorable way (like adding whitespace)
+
+MAYBE_WARN="-Wall -Wextra \
+-Wsign-compare -Werror-implicit-function-declaration \
+-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
+-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
+-Wpacked -Wswitch-enum -Wmissing-format-attribute \
+-Wstrict-aliasing=2 -Winit-self \
+-Wdeclaration-after-statement -Wold-style-definition \
+-Wno-unused-parameter \
+-Wno-attributes -Wno-long-long -Winline -Wshadow \
+-Wno-missing-field-initializers"
+
+# invalidate cached value if MAYBE_WARN has changed
+if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then
+ unset libdrm_cv_warn_cflags
+fi
+AC_CACHE_CHECK([for supported warning flags], libdrm_cv_warn_cflags, [
+ echo
+ WARN_CFLAGS=""
+
+ # Some warning options are not supported by all versions of
+ # gcc, so test all desired options against the current
+ # compiler.
+ #
+ # Note that there are some order dependencies
+ # here. Specifically, an option that disables a warning will
+ # have no net effect if a later option then enables that
+ # warnings, (perhaps implicitly). So we put some grouped
+ # options (-Wall and -Wextra) up front and the -Wno options
+ # last.
+
+ for W in $MAYBE_WARN; do
+ LIBDRM_CC_TRY_FLAG([$W], [WARN_CFLAGS="$WARN_CFLAGS $W"])
+ done
+
+ libdrm_cv_warn_cflags=$WARN_CFLAGS
+ libdrm_cv_warn_maybe=$MAYBE_WARN
+
+ AC_MSG_CHECKING([which warning flags were supported])])
+WARN_CFLAGS="$libdrm_cv_warn_cflags"
+
+# Check for atomic intrinsics
+AC_CACHE_CHECK([for native atomic primitives], drm_cv_atomic_primitives, [
+ drm_cv_atomic_primitives="none"
+
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+ int atomic_add(int *i) { return __sync_add_and_fetch (i, 1); }
+ int atomic_cmpxchg(int *i, int j, int k) { return __sync_val_compare_and_swap (i, j, k); }
+ ]],[[]])], [drm_cv_atomic_primitives="Intel"],[])
+
+ if test "x$drm_cv_atomic_primitives" = "xnone"; then
+ AC_CHECK_HEADER([atomic_ops.h], drm_cv_atomic_primitives="libatomic-ops")
+ fi
+
+ # atomic functions defined in <atomic.h> & libc on Solaris
+ if test "x$drm_cv_atomic_primitives" = "xnone"; then
+ AC_CHECK_FUNC([atomic_cas_uint], drm_cv_atomic_primitives="Solaris")
+ fi
+])
+
+if test "x$drm_cv_atomic_primitives" = xIntel; then
+ AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 1,
+ [Enable if your compiler supports the Intel __sync_* atomic primitives])
+fi
+if test "x$drm_cv_atomic_primitives" = "xlibatomic-ops"; then
+ AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 1, [Enable if you have libatomic-ops-dev installed])
+fi
+
+dnl Print out the approapriate message considering the value set be the
+dnl respective in $1.
+dnl $1 - value to be evaluated. Eg. $INTEL, $NOUVEAU, ...
+dnl $2 - libdrm shortname. Eg. intel, freedreno, ...
+dnl $3 - GPU name/brand. Eg. Intel, NVIDIA Tegra, ...
+dnl $4 - Configure switch. Eg. intel, omap-experimental-api, ...
+AC_DEFUN([LIBDRM_ATOMICS_NOT_FOUND_MSG], [
+ case "x$1" in
+ xyes) AC_MSG_ERROR([libdrm_$2 depends upon atomic operations, which were not found for your compiler/cpu. Try compiling with -march=native, or install the libatomics-op-dev package, or, failing both of those, disable support for $3 GPUs by passing --disable-$4 to ./configure]) ;;
+ xauto) AC_MSG_WARN([Disabling $2. It depends on atomic operations, which were not found for your compiler/cpu. Try compiling with -march=native, or install the libatomics-op-dev package.]) ;;
+ *) ;;
+ esac
+])
+
+if test "x$drm_cv_atomic_primitives" = "xnone"; then
+ LIBDRM_ATOMICS_NOT_FOUND_MSG($INTEL, intel, Intel, intel)
+ INTEL=no
+
+ LIBDRM_ATOMICS_NOT_FOUND_MSG($RADEON, radeon, Radeon, radeon)
+ RADEON=no
+
+ LIBDRM_ATOMICS_NOT_FOUND_MSG($AMDGPU, amdgpu, AMD, amdgpu)
+ AMDGPU=no
+
+ LIBDRM_ATOMICS_NOT_FOUND_MSG($NOUVEAU, nouveau, NVIDIA, nouveau)
+ NOUVEAU=no
+
+ LIBDRM_ATOMICS_NOT_FOUND_MSG($OMAP, omap, OMAP, omap-experimental-api)
+ OMAP=no
+
+ LIBDRM_ATOMICS_NOT_FOUND_MSG($FREEDRENO, freedreno, Qualcomm Adreno, freedreno)
+ FREEDRENO=no
+
+ LIBDRM_ATOMICS_NOT_FOUND_MSG($TEGRA, tegra, NVIDIA Tegra, tegra-experimental-api)
+ TEGRA=no
+else
+ if test "x$INTEL" = xauto; then
+ case $host_cpu in
+ i?86|x86_64) INTEL=yes ;;
+ *) INTEL=no ;;
+ esac
+ fi
+ if test "x$RADEON" = xauto; then
+ RADEON=yes
+ fi
+ if test "x$AMDGPU" = xauto; then
+ AMDGPU=yes
+ fi
+ if test "x$NOUVEAU" = xauto; then
+ NOUVEAU=yes
+ fi
+ if test "x$FREEDRENO" = xauto; then
+ case $host_cpu in
+ arm*|aarch64) FREEDRENO=yes ;;
+ *) FREEDRENO=no ;;
+ esac
+ fi
+ if test "x$VC4" = xauto; then
+ case $host_cpu in
+ arm*|aarch64) VC4=yes ;;
+ *) VC4=no ;;
+ esac
+ fi
+fi
+
+if test "x$INTEL" != "xno"; then
+ PKG_CHECK_MODULES(PCIACCESS, [pciaccess >= 0.10])
+fi
+AC_SUBST(PCIACCESS_CFLAGS)
+AC_SUBST(PCIACCESS_LIBS)
+
+if test "x$UDEV" = xyes; then
+ AC_DEFINE(UDEV, 1, [Have UDEV support])
+fi
+
+AC_CANONICAL_HOST
+if test "x$LIBKMS" = xauto ; then
+ case $host_os in
+ linux*) LIBKMS="yes" ;;
+ freebsd* | kfreebsd*-gnu)
+ LIBKMS="yes" ;;
+ dragonfly*) LIBKMS="yes" ;;
+ *) LIBKMS="no" ;;
+ esac
+fi
+
+AM_CONDITIONAL(HAVE_LIBKMS, [test "x$LIBKMS" = xyes])
+
+AM_CONDITIONAL(HAVE_INTEL, [test "x$INTEL" = xyes])
+if test "x$INTEL" = xyes; then
+ AC_DEFINE(HAVE_INTEL, 1, [Have intel support])
+fi
+
+AM_CONDITIONAL(HAVE_VMWGFX, [test "x$VMWGFX" = xyes])
+if test "x$VMWGFX" = xyes; then
+ AC_DEFINE(HAVE_VMWGFX, 1, [Have vmwgfx kernel headers])
+fi
+
+AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" = xyes])
+if test "x$NOUVEAU" = xyes; then
+ AC_DEFINE(HAVE_NOUVEAU, 1, [Have nouveau (nvidia) support])
+fi
+
+AM_CONDITIONAL(HAVE_OMAP, [test "x$OMAP" = xyes])
+if test "x$OMAP" = xyes; then
+ AC_DEFINE(HAVE_OMAP, 1, [Have OMAP support])
+fi
+
+AM_CONDITIONAL(HAVE_EXYNOS, [test "x$EXYNOS" = xyes])
+if test "x$EXYNOS" = xyes; then
+ AC_DEFINE(HAVE_EXYNOS, 1, [Have EXYNOS support])
+fi
+
+AM_CONDITIONAL(HAVE_FREEDRENO, [test "x$FREEDRENO" = xyes])
+if test "x$FREEDRENO" = xyes; then
+ AC_DEFINE(HAVE_FREEDRENO, 1, [Have freedreno support])
+fi
+
+if test "x$FREEDRENO_KGSL" = xyes; then
+ if test "x$FREEDRENO" != xyes; then
+ AC_MSG_ERROR([Cannot enable freedreno KGSL interface if freedreno is disabled])
+ fi
+fi
+AM_CONDITIONAL(HAVE_FREEDRENO_KGSL, [test "x$FREEDRENO_KGSL" = xyes])
+if test "x$FREEDRENO_KGSL" = xyes; then
+ AC_DEFINE(HAVE_FREEDRENO_KGSL, 1, [Have freedreno support for KGSL kernel interface])
+fi
+
+AM_CONDITIONAL(HAVE_RADEON, [test "x$RADEON" = xyes])
+if test "x$RADEON" = xyes; then
+ AC_DEFINE(HAVE_RADEON, 1, [Have radeon support])
+fi
+
+if test "x$AMDGPU" != xno; then
+ # Detect cunit library
+ PKG_CHECK_MODULES([CUNIT], [cunit >= 2.1], [have_cunit=yes], [have_cunit=no])
+ # If pkg-config does not find cunit, check it using AC_CHECK_LIB. We
+ # do this because Debian (Ubuntu) lacks pkg-config file for cunit.
+ # fixed in 2.1-2.dfsg-3: http://anonscm.debian.org/cgit/collab-maint/cunit.git/commit/?h=debian
+ if test "x${have_cunit}" = "xno"; then
+ AC_CHECK_LIB([cunit], [CU_initialize_registry], [have_cunit=yes], [have_cunit=no])
+ if test "x${have_cunit}" = "xyes"; then
+ CUNIT_LIBS="-lcunit"
+ CUNIT_CFLAGS=""
+ AC_SUBST([CUNIT_LIBS])
+ AC_SUBST([CUNIT_CFLAGS])
+ fi
+ fi
+else
+ have_cunit=no
+fi
+AM_CONDITIONAL(HAVE_CUNIT, [test "x$have_cunit" != "xno"])
+
+AM_CONDITIONAL(HAVE_AMDGPU, [test "x$AMDGPU" = xyes])
+if test "x$AMDGPU" = xyes; then
+ AC_DEFINE(HAVE_AMDGPU, 1, [Have amdgpu support])
+
+ AC_DEFINE(HAVE_CUNIT, [test "x$have_cunit" != "xno"], [Enable CUNIT Have amdgpu support])
+
+ if test "x$have_cunit" = "xno"; then
+ AC_MSG_WARN([Could not find cunit library. Disabling amdgpu tests])
+ fi
+fi
+
+AM_CONDITIONAL(HAVE_TEGRA, [test "x$TEGRA" = xyes])
+if test "x$TEGRA" = xyes; then
+ AC_DEFINE(HAVE_TEGRA, 1, [Have Tegra support])
+fi
+
+AM_CONDITIONAL(HAVE_VC4, [test "x$VC4" = xyes])
+if test "x$VC4" = xyes; then
+ AC_DEFINE(HAVE_VC4, 1, [Have VC4 support])
+fi
+
+AM_CONDITIONAL(HAVE_ROCKCHIP, [test "x$ROCKCHIP" = xyes])
+if test "x$ROCKCHIP" = xyes; then
+ AC_DEFINE(HAVE_ROCKCHIP, 1, [Have ROCKCHIP support])
+fi
+
+AM_CONDITIONAL(HAVE_MEDIATEK, [test "x$MEDIATEK" = xyes])
+if test "x$MEDIATEK" = xyes; then
+ AC_DEFINE(HAVE_MEDIATEK, 1, [Have MEDIATEK support])
+fi
+
+AM_CONDITIONAL(HAVE_INSTALL_TESTS, [test "x$INSTALL_TESTS" = xyes])
+if test "x$INSTALL_TESTS" = xyes; then
+ AC_DEFINE(HAVE_INSTALL_TESTS, 1, [Install test programs])
+fi
+
+AC_ARG_ENABLE([cairo-tests],
+ [AS_HELP_STRING([--enable-cairo-tests],
+ [Enable support for Cairo rendering in tests (default: auto)])],
+ [CAIRO=$enableval], [CAIRO=auto])
+if test "x$CAIRO" != xno; then
+ PKG_CHECK_MODULES(CAIRO, cairo, [HAVE_CAIRO=yes], [HAVE_CAIRO=no])
+fi
+AC_MSG_CHECKING([whether to enable Cairo tests])
+if test "x$CAIRO" = xauto; then
+ CAIRO="$HAVE_CAIRO"
+fi
+if test "x$CAIRO" = xyes; then
+ if ! test "x$HAVE_CAIRO" = xyes; then
+ AC_MSG_ERROR([Cairo support required but not present])
+ fi
+ AC_DEFINE(HAVE_CAIRO, 1, [Have Cairo support])
+fi
+AC_MSG_RESULT([$CAIRO])
+AM_CONDITIONAL(HAVE_CAIRO, [test "x$CAIRO" = xyes])
+
+# For enumerating devices in test case
+PKG_CHECK_MODULES(LIBUDEV, libudev, [HAVE_LIBUDEV=yes], [HAVE_LIBUDEV=no])
+if test "x$HAVE_LIBUDEV" = xyes; then
+ AC_DEFINE(HAVE_LIBUDEV, 1, [Have libudev support])
+fi
+AM_CONDITIONAL(HAVE_LIBUDEV, [test "x$HAVE_LIBUDEV" = xyes])
+
+# xsltproc for docbook manpages
+AC_ARG_ENABLE([manpages],
+ AS_HELP_STRING([--enable-manpages], [enable manpages @<:@default=auto@:>@]),
+ [MANS=$enableval], [MANS=auto])
+AM_CONDITIONAL([BUILD_MANPAGES], [test "x$XSLTPROC" != "x" -a "x$MANS" != "xno"])
+
+# check for offline man-pages stylesheet
+AC_MSG_CHECKING([for docbook manpages stylesheet])
+MANPAGES_STYLESHEET="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"
+AC_PATH_PROGS_FEATURE_CHECK([XSLTPROC_TMP], [xsltproc],
+ AS_IF([`"$ac_path_XSLTPROC_TMP" --nonet "$MANPAGES_STYLESHEET" > /dev/null 2>&1`],
+ [HAVE_MANPAGES_STYLESHEET=yes]))
+if test "x$HAVE_MANPAGES_STYLESHEET" = "xyes"; then
+ AC_SUBST(MANPAGES_STYLESHEET)
+ AC_MSG_RESULT([yes])
+else
+ AC_MSG_RESULT([no])
+fi
+AM_CONDITIONAL([HAVE_MANPAGES_STYLESHEET], [test "x$HAVE_MANPAGES_STYLESHEET" = "xyes"])
+
+AC_ARG_ENABLE(valgrind,
+ [AS_HELP_STRING([--enable-valgrind],
+ [Build libdrm with valgrind support (default: auto)])],
+ [VALGRIND=$enableval], [VALGRIND=auto])
+if test "x$VALGRIND" != xno; then
+ PKG_CHECK_MODULES(VALGRIND, [valgrind], [have_valgrind=yes], [have_valgrind=no])
+fi
+AC_MSG_CHECKING([whether to enable Valgrind support])
+if test "x$VALGRIND" = xauto; then
+ VALGRIND="$have_valgrind"
+fi
+
+if test "x$VALGRIND" = "xyes"; then
+ if ! test "x$have_valgrind" = xyes; then
+ AC_MSG_ERROR([Valgrind support required but not present])
+ fi
+ AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
+fi
+
+AC_MSG_RESULT([$VALGRIND])
+
+AC_ARG_WITH([kernel-source],
+ [AS_HELP_STRING([--with-kernel-source],
+ [specify path to linux kernel source])],
+ [kernel_source="$with_kernel_source"])
+AC_SUBST(kernel_source)
+
+AC_MSG_CHECKING([whether $CC supports __attribute__(("hidden"))])
+AC_LINK_IFELSE([AC_LANG_PROGRAM([
+ int foo_hidden( void ) __attribute__((visibility("hidden")));
+])], HAVE_ATTRIBUTE_VISIBILITY="yes"; AC_MSG_RESULT([yes]), AC_MSG_RESULT([no]));
+
+if test "x$HAVE_ATTRIBUTE_VISIBILITY" = xyes; then
+ AC_DEFINE(HAVE_VISIBILITY, 1, [Compiler supports __attribute__(("hidden"))])
+fi
+
+AC_SUBST(WARN_CFLAGS)
+AC_CONFIG_FILES([
+ Makefile
+ libkms/Makefile
+ libkms/libkms.pc
+ intel/Makefile
+ intel/libdrm_intel.pc
+ radeon/Makefile
+ radeon/libdrm_radeon.pc
+ amdgpu/Makefile
+ amdgpu/libdrm_amdgpu.pc
+ nouveau/Makefile
+ nouveau/libdrm_nouveau.pc
+ omap/Makefile
+ omap/libdrm_omap.pc
+ exynos/Makefile
+ exynos/libdrm_exynos.pc
+ freedreno/Makefile
+ freedreno/libdrm_freedreno.pc
+ tegra/Makefile
+ tegra/libdrm_tegra.pc
+ vc4/Makefile
+ vc4/libdrm_vc4.pc
+ rockchip/Makefile
+ rockchip/libdrm_rockchip.pc
+ mediatek/Makefile
+ mediatek/libdrm_mediatek.pc
+ tests/Makefile
+ tests/modeprint/Makefile
+ tests/modetest/Makefile
+ tests/kms/Makefile
+ tests/kmstest/Makefile
+ tests/proptest/Makefile
+ tests/radeon/Makefile
+ tests/amdgpu/Makefile
+ tests/vbltest/Makefile
+ tests/exynos/Makefile
+ tests/tegra/Makefile
+ tests/nouveau/Makefile
+ tests/util/Makefile
+ man/Makefile
+ libdrm.pc])
+AC_OUTPUT
+
+echo ""
+echo "$PACKAGE_STRING will be compiled with:"
+echo ""
+echo " libkms $LIBKMS"
+echo " Intel API $INTEL"
+echo " vmwgfx API $VMWGFX"
+echo " Radeon API $RADEON"
+echo " AMDGPU API $AMDGPU"
+echo " Nouveau API $NOUVEAU"
+echo " OMAP API $OMAP"
+echo " EXYNOS API $EXYNOS"
+echo " Freedreno API $FREEDRENO (kgsl: $FREEDRENO_KGSL)"
+echo " Tegra API $TEGRA"
+echo " VC4 API $VC4"
+echo " Rockchip API $ROCKCHIP"
+echo " Mediatek API $MEDIATEK"
+echo ""
diff --git a/chromium/third_party/libdrm/src/exynos/Makefile.am b/chromium/third_party/libdrm/src/exynos/Makefile.am
new file mode 100644
index 00000000000..f99f8981ff9
--- /dev/null
+++ b/chromium/third_party/libdrm/src/exynos/Makefile.am
@@ -0,0 +1,27 @@
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ $(PTHREADSTUBS_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_exynos_la_LTLIBRARIES = libdrm_exynos.la
+libdrm_exynos_ladir = $(libdir)
+libdrm_exynos_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libdrm_exynos_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
+
+libdrm_exynos_la_SOURCES = \
+ exynos_drm.c \
+ exynos_fimg2d.c \
+ fimg2d_reg.h
+
+libdrm_exynoscommonincludedir = ${includedir}/exynos
+libdrm_exynoscommoninclude_HEADERS = exynos_drm.h exynos_fimg2d.h
+
+libdrm_exynosincludedir = ${includedir}/libdrm
+libdrm_exynosinclude_HEADERS = exynos_drmif.h
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm_exynos.pc
+
+TESTS = exynos-symbol-check
+EXTRA_DIST = $(TESTS)
diff --git a/chromium/third_party/libdrm/src/exynos/exynos-symbol-check b/chromium/third_party/libdrm/src/exynos/exynos-symbol-check
new file mode 100755
index 00000000000..9692caa685b
--- /dev/null
+++ b/chromium/third_party/libdrm/src/exynos/exynos-symbol-check
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# The following symbols (past the first five) are taken from the public headers.
+# A list of the latter should be available Makefile.am/libdrm_exynos*_HEADERS
+
+FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_exynos.so} | awk '{print $3}'| while read func; do
+( grep -q "^$func$" || echo $func ) <<EOF
+__bss_start
+_edata
+_end
+_fini
+_init
+exynos_bo_create
+exynos_bo_destroy
+exynos_bo_from_name
+exynos_bo_get_info
+exynos_bo_get_name
+exynos_bo_handle
+exynos_bo_map
+exynos_device_create
+exynos_device_destroy
+exynos_prime_fd_to_handle
+exynos_prime_handle_to_fd
+exynos_vidi_connection
+exynos_handle_event
+g2d_blend
+g2d_copy
+g2d_copy_with_scale
+g2d_exec
+g2d_config_event
+g2d_fini
+g2d_init
+g2d_move
+g2d_scale_and_blend
+g2d_solid_fill
+EOF
+done)
+
+test ! -n "$FUNCS" || echo $FUNCS
+test ! -n "$FUNCS"
diff --git a/chromium/third_party/libdrm/src/exynos/exynos_drm.c b/chromium/third_party/libdrm/src/exynos/exynos_drm.c
new file mode 100644
index 00000000000..b961e520737
--- /dev/null
+++ b/chromium/third_party/libdrm/src/exynos/exynos_drm.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <sys/mman.h>
+#include <linux/stddef.h>
+
+#include <xf86drm.h>
+
+#include "libdrm_macros.h"
+#include "exynos_drm.h"
+#include "exynos_drmif.h"
+
+#define U642VOID(x) ((void *)(unsigned long)(x))
+
+/*
+ * Create exynos drm device object.
+ *
+ * @fd: file descriptor to exynos drm driver opened.
+ *
+ * if true, return the device object else NULL.
+ */
+struct exynos_device * exynos_device_create(int fd)
+{
+ struct exynos_device *dev;
+
+ dev = calloc(sizeof(*dev), 1);
+ if (!dev) {
+ fprintf(stderr, "failed to create device[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ dev->fd = fd;
+
+ return dev;
+}
+
+/*
+ * Destroy exynos drm device object
+ *
+ * @dev: exynos drm device object.
+ */
+void exynos_device_destroy(struct exynos_device *dev)
+{
+ free(dev);
+}
+
+/*
+ * Create a exynos buffer object to exynos drm device.
+ *
+ * @dev: exynos drm device object.
+ * @size: user-desired size.
+ * flags: user-desired memory type.
+ * user can set one or more types among several types to memory
+ * allocation and cache attribute types. and as default,
+ * EXYNOS_BO_NONCONTIG and EXYNOS-BO_NONCACHABLE types would
+ * be used.
+ *
+ * if true, return a exynos buffer object else NULL.
+ */
+struct exynos_bo * exynos_bo_create(struct exynos_device *dev,
+ size_t size, uint32_t flags)
+{
+ struct exynos_bo *bo;
+ struct drm_exynos_gem_create req = {
+ .size = size,
+ .flags = flags,
+ };
+
+ if (size == 0) {
+ fprintf(stderr, "invalid size.\n");
+ goto fail;
+ }
+
+ bo = calloc(sizeof(*bo), 1);
+ if (!bo) {
+ fprintf(stderr, "failed to create bo[%s].\n",
+ strerror(errno));
+ goto err_free_bo;
+ }
+
+ bo->dev = dev;
+
+ if (drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_GEM_CREATE, &req)){
+ fprintf(stderr, "failed to create gem object[%s].\n",
+ strerror(errno));
+ goto err_free_bo;
+ }
+
+ bo->handle = req.handle;
+ bo->size = size;
+ bo->flags = flags;
+
+ return bo;
+
+err_free_bo:
+ free(bo);
+fail:
+ return NULL;
+}
+
+/*
+ * Get information to gem region allocated.
+ *
+ * @dev: exynos drm device object.
+ * @handle: gem handle to request gem info.
+ * @size: size to gem object and returned by kernel side.
+ * @flags: gem flags to gem object and returned by kernel side.
+ *
+ * with this function call, you can get flags and size to gem handle
+ * through bo object.
+ *
+ * if true, return 0 else negative.
+ */
+int exynos_bo_get_info(struct exynos_device *dev, uint32_t handle,
+ size_t *size, uint32_t *flags)
+{
+ int ret;
+ struct drm_exynos_gem_info req = {
+ .handle = handle,
+ };
+
+ ret = drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_GEM_GET, &req);
+ if (ret < 0) {
+ fprintf(stderr, "failed to get gem object information[%s].\n",
+ strerror(errno));
+ return ret;
+ }
+
+ *size = req.size;
+ *flags = req.flags;
+
+ return 0;
+}
+
+/*
+ * Destroy a exynos buffer object.
+ *
+ * @bo: a exynos buffer object to be destroyed.
+ */
+void exynos_bo_destroy(struct exynos_bo *bo)
+{
+ if (!bo)
+ return;
+
+ if (bo->vaddr)
+ munmap(bo->vaddr, bo->size);
+
+ if (bo->handle) {
+ struct drm_gem_close req = {
+ .handle = bo->handle,
+ };
+
+ drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ }
+
+ free(bo);
+}
+
+
+/*
+ * Get a exynos buffer object from a gem global object name.
+ *
+ * @dev: a exynos device object.
+ * @name: a gem global object name exported by another process.
+ *
+ * this interface is used to get a exynos buffer object from a gem
+ * global object name sent by another process for buffer sharing.
+ *
+ * if true, return a exynos buffer object else NULL.
+ *
+ */
+struct exynos_bo *
+exynos_bo_from_name(struct exynos_device *dev, uint32_t name)
+{
+ struct exynos_bo *bo;
+ struct drm_gem_open req = {
+ .name = name,
+ };
+
+ bo = calloc(sizeof(*bo), 1);
+ if (!bo) {
+ fprintf(stderr, "failed to allocate bo[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
+ fprintf(stderr, "failed to open gem object[%s].\n",
+ strerror(errno));
+ goto err_free_bo;
+ }
+
+ bo->dev = dev;
+ bo->name = name;
+ bo->handle = req.handle;
+
+ return bo;
+
+err_free_bo:
+ free(bo);
+ return NULL;
+}
+
+/*
+ * Get a gem global object name from a gem object handle.
+ *
+ * @bo: a exynos buffer object including gem handle.
+ * @name: a gem global object name to be got by kernel driver.
+ *
+ * this interface is used to get a gem global object name from a gem object
+ * handle to a buffer that wants to share it with another process.
+ *
+ * if true, return 0 else negative.
+ */
+int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
+{
+ if (!bo->name) {
+ struct drm_gem_flink req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
+ if (ret) {
+ fprintf(stderr, "failed to get gem global name[%s].\n",
+ strerror(errno));
+ return ret;
+ }
+
+ bo->name = req.name;
+ }
+
+ *name = bo->name;
+
+ return 0;
+}
+
+uint32_t exynos_bo_handle(struct exynos_bo *bo)
+{
+ return bo->handle;
+}
+
+/*
+ * Mmap a buffer to user space.
+ *
+ * @bo: a exynos buffer object including a gem object handle to be mmapped
+ * to user space.
+ *
+ * if true, user pointer mmaped else NULL.
+ */
+void *exynos_bo_map(struct exynos_bo *bo)
+{
+ if (!bo->vaddr) {
+ struct exynos_device *dev = bo->dev;
+ struct drm_mode_map_dumb arg;
+ void *map = NULL;
+ int ret;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->handle;
+
+ ret = drmIoctl(dev->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
+ if (ret) {
+ fprintf(stderr, "failed to map dumb buffer[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ dev->fd, arg.offset);
+
+ if (map != MAP_FAILED)
+ bo->vaddr = map;
+ }
+
+ return bo->vaddr;
+}
+
+/*
+ * Export gem object to dmabuf as file descriptor.
+ *
+ * @dev: exynos device object
+ * @handle: gem handle to export as file descriptor of dmabuf
+ * @fd: file descriptor returned from kernel
+ *
+ * @return: 0 on success, -1 on error, and errno will be set
+ */
+int
+exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
+{
+ return drmPrimeHandleToFD(dev->fd, handle, 0, fd);
+}
+
+/*
+ * Import file descriptor into gem handle.
+ *
+ * @dev: exynos device object
+ * @fd: file descriptor of dmabuf to import
+ * @handle: gem handle returned from kernel
+ *
+ * @return: 0 on success, -1 on error, and errno will be set
+ */
+int
+exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
+{
+ return drmPrimeFDToHandle(dev->fd, fd, handle);
+}
+
+
+
+/*
+ * Request Wireless Display connection or disconnection.
+ *
+ * @dev: a exynos device object.
+ * @connect: indicate whether connectoin or disconnection request.
+ * @ext: indicate whether edid data includes extensions data or not.
+ * @edid: a pointer to edid data from Wireless Display device.
+ *
+ * this interface is used to request Virtual Display driver connection or
+ * disconnection. for this, user should get a edid data from the Wireless
+ * Display device and then send that data to kernel driver with connection
+ * request
+ *
+ * if true, return 0 else negative.
+ */
+int
+exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
+ uint32_t ext, void *edid)
+{
+ struct drm_exynos_vidi_connection req = {
+ .connection = connect,
+ .extensions = ext,
+ .edid = (uint64_t)(uintptr_t)edid,
+ };
+ int ret;
+
+ ret = drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_VIDI_CONNECTION, &req);
+ if (ret) {
+ fprintf(stderr, "failed to request vidi connection[%s].\n",
+ strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+exynos_handle_vendor(int fd, struct drm_event *e, void *ctx)
+{
+ struct drm_exynos_g2d_event *g2d;
+ struct exynos_event_context *ectx = ctx;
+
+ switch (e->type) {
+ case DRM_EXYNOS_G2D_EVENT:
+ if (ectx->version < 1 || ectx->g2d_event_handler == NULL)
+ break;
+ g2d = (struct drm_exynos_g2d_event *)e;
+ ectx->g2d_event_handler(fd, g2d->cmdlist_no, g2d->tv_sec,
+ g2d->tv_usec, U642VOID(g2d->user_data));
+ break;
+
+ default:
+ break;
+ }
+}
+
+int
+exynos_handle_event(struct exynos_device *dev, struct exynos_event_context *ctx)
+{
+ char buffer[1024];
+ int len, i;
+ struct drm_event *e;
+ struct drm_event_vblank *vblank;
+ drmEventContextPtr evctx = &ctx->base;
+
+ /* The DRM read semantics guarantees that we always get only
+ * complete events. */
+ len = read(dev->fd, buffer, sizeof buffer);
+ if (len == 0)
+ return 0;
+ if (len < (int)sizeof *e)
+ return -1;
+
+ i = 0;
+ while (i < len) {
+ e = (struct drm_event *) &buffer[i];
+ switch (e->type) {
+ case DRM_EVENT_VBLANK:
+ if (evctx->version < 1 ||
+ evctx->vblank_handler == NULL)
+ break;
+ vblank = (struct drm_event_vblank *) e;
+ evctx->vblank_handler(dev->fd,
+ vblank->sequence,
+ vblank->tv_sec,
+ vblank->tv_usec,
+ U642VOID (vblank->user_data));
+ break;
+ case DRM_EVENT_FLIP_COMPLETE:
+ if (evctx->version < 2 ||
+ evctx->page_flip_handler == NULL)
+ break;
+ vblank = (struct drm_event_vblank *) e;
+ evctx->page_flip_handler(dev->fd,
+ vblank->sequence,
+ vblank->tv_sec,
+ vblank->tv_usec,
+ U642VOID (vblank->user_data));
+ break;
+ default:
+ exynos_handle_vendor(dev->fd, e, evctx);
+ break;
+ }
+ i += e->length;
+ }
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/exynos/exynos_drm.h b/chromium/third_party/libdrm/src/exynos/exynos_drm.h
new file mode 100644
index 00000000000..c3af0ac5f6d
--- /dev/null
+++ b/chromium/third_party/libdrm/src/exynos/exynos_drm.h
@@ -0,0 +1,172 @@
+/* exynos_drm.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_H_
+#define _EXYNOS_DRM_H_
+
+#include "drm.h"
+
+/**
+ * User-desired buffer creation information structure.
+ *
+ * @size: user-desired memory allocation size.
+ * - this size value would be page-aligned internally.
+ * @flags: user request for setting memory type or cache attributes.
+ * @handle: returned a handle to created gem object.
+ * - this handle will be set by gem module of kernel side.
+ */
+struct drm_exynos_gem_create {
+ uint64_t size;
+ unsigned int flags;
+ unsigned int handle;
+};
+
+/**
+ * A structure to gem information.
+ *
+ * @handle: a handle to gem object created.
+ * @flags: flag value including memory type and cache attribute and
+ * this value would be set by driver.
+ * @size: size to memory region allocated by gem and this size would
+ * be set by driver.
+ */
+struct drm_exynos_gem_info {
+ unsigned int handle;
+ unsigned int flags;
+ uint64_t size;
+};
+
+/**
+ * A structure for user connection request of virtual display.
+ *
+ * @connection: indicate whether doing connetion or not by user.
+ * @extensions: if this value is 1 then the vidi driver would need additional
+ * 128bytes edid data.
+ * @edid: the edid data pointer from user side.
+ */
+struct drm_exynos_vidi_connection {
+ unsigned int connection;
+ unsigned int extensions;
+ uint64_t edid;
+};
+
+/* memory type definitions. */
+enum e_drm_exynos_gem_mem_type {
+ /* Physically Continuous memory and used as default. */
+ EXYNOS_BO_CONTIG = 0 << 0,
+ /* Physically Non-Continuous memory. */
+ EXYNOS_BO_NONCONTIG = 1 << 0,
+ /* non-cachable mapping and used as default. */
+ EXYNOS_BO_NONCACHABLE = 0 << 1,
+ /* cachable mapping. */
+ EXYNOS_BO_CACHABLE = 1 << 1,
+ /* write-combine mapping. */
+ EXYNOS_BO_WC = 1 << 2,
+ EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE |
+ EXYNOS_BO_WC
+};
+
+struct drm_exynos_g2d_get_ver {
+ __u32 major;
+ __u32 minor;
+};
+
+struct drm_exynos_g2d_cmd {
+ __u32 offset;
+ __u32 data;
+};
+
+enum drm_exynos_g2d_buf_type {
+ G2D_BUF_USERPTR = 1 << 31,
+};
+
+enum drm_exynos_g2d_event_type {
+ G2D_EVENT_NOT,
+ G2D_EVENT_NONSTOP,
+ G2D_EVENT_STOP, /* not yet */
+};
+
+struct drm_exynos_g2d_userptr {
+ unsigned long userptr;
+ unsigned long size;
+};
+
+struct drm_exynos_g2d_set_cmdlist {
+ __u64 cmd;
+ __u64 cmd_buf;
+ __u32 cmd_nr;
+ __u32 cmd_buf_nr;
+
+ /* for g2d event */
+ __u64 event_type;
+ __u64 user_data;
+};
+
+struct drm_exynos_g2d_exec {
+ __u64 async;
+};
+
+#define DRM_EXYNOS_GEM_CREATE 0x00
+/* Reserved 0x04 ~ 0x05 for exynos specific gem ioctl */
+#define DRM_EXYNOS_GEM_GET 0x04
+#define DRM_EXYNOS_VIDI_CONNECTION 0x07
+
+/* G2D */
+#define DRM_EXYNOS_G2D_GET_VER 0x20
+#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
+#define DRM_EXYNOS_G2D_EXEC 0x22
+
+#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
+
+#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
+
+#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
+
+#define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver)
+#define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist)
+#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
+
+/* EXYNOS specific events */
+#define DRM_EXYNOS_G2D_EVENT 0x80000000
+
+struct drm_exynos_g2d_event {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 cmdlist_no;
+ __u32 reserved;
+};
+
+#endif
diff --git a/chromium/third_party/libdrm/src/exynos/exynos_drmif.h b/chromium/third_party/libdrm/src/exynos/exynos_drmif.h
new file mode 100644
index 00000000000..626e39985a1
--- /dev/null
+++ b/chromium/third_party/libdrm/src/exynos/exynos_drmif.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ */
+
+#ifndef EXYNOS_DRMIF_H_
+#define EXYNOS_DRMIF_H_
+
+#include <xf86drm.h>
+#include <stdint.h>
+#include "exynos_drm.h"
+
+struct exynos_device {
+ int fd;
+};
+
+/*
+ * Exynos Buffer Object structure.
+ *
+ * @dev: exynos device object allocated.
+ * @handle: a gem handle to gem object created.
+ * @flags: indicate memory allocation and cache attribute types.
+ * @size: size to the buffer created.
+ * @vaddr: user space address to a gem buffer mmaped.
+ * @name: a gem global handle from flink request.
+ */
+struct exynos_bo {
+ struct exynos_device *dev;
+ uint32_t handle;
+ uint32_t flags;
+ size_t size;
+ void *vaddr;
+ uint32_t name;
+};
+
+#define EXYNOS_EVENT_CONTEXT_VERSION 1
+
+/*
+ * Exynos Event Context structure.
+ *
+ * @base: base context (for core events).
+ * @version: version info similar to the one in 'drmEventContext'.
+ * @g2d_event_handler: handler for G2D events.
+ */
+struct exynos_event_context {
+ drmEventContext base;
+
+ int version;
+
+ void (*g2d_event_handler)(int fd, unsigned int cmdlist_no,
+ unsigned int tv_sec, unsigned int tv_usec,
+ void *user_data);
+};
+
+/*
+ * device related functions:
+ */
+struct exynos_device * exynos_device_create(int fd);
+void exynos_device_destroy(struct exynos_device *dev);
+
+/*
+ * buffer-object related functions:
+ */
+struct exynos_bo * exynos_bo_create(struct exynos_device *dev,
+ size_t size, uint32_t flags);
+int exynos_bo_get_info(struct exynos_device *dev, uint32_t handle,
+ size_t *size, uint32_t *flags);
+void exynos_bo_destroy(struct exynos_bo *bo);
+struct exynos_bo * exynos_bo_from_name(struct exynos_device *dev, uint32_t name);
+int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name);
+uint32_t exynos_bo_handle(struct exynos_bo *bo);
+void * exynos_bo_map(struct exynos_bo *bo);
+int exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle,
+ int *fd);
+int exynos_prime_fd_to_handle(struct exynos_device *dev, int fd,
+ uint32_t *handle);
+
+/*
+ * Virtual Display related functions:
+ */
+int exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
+ uint32_t ext, void *edid);
+
+/*
+ * event handling related functions:
+ */
+int exynos_handle_event(struct exynos_device *dev,
+ struct exynos_event_context *ctx);
+
+
+#endif /* EXYNOS_DRMIF_H_ */
diff --git a/chromium/third_party/libdrm/src/exynos/exynos_fimg2d.c b/chromium/third_party/libdrm/src/exynos/exynos_fimg2d.c
new file mode 100644
index 00000000000..7f1d105a5cb
--- /dev/null
+++ b/chromium/third_party/libdrm/src/exynos/exynos_fimg2d.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#include <sys/mman.h>
+#include <linux/stddef.h>
+
+#include <xf86drm.h>
+
+#include "libdrm_macros.h"
+#include "exynos_drm.h"
+#include "fimg2d_reg.h"
+#include "exynos_fimg2d.h"
+
+#define SET_BF(val, sc, si, scsa, scda, dc, di, dcsa, dcda) \
+ val.data.src_coeff = sc; \
+ val.data.inv_src_color_coeff = si; \
+ val.data.src_coeff_src_a = scsa; \
+ val.data.src_coeff_dst_a = scda; \
+ val.data.dst_coeff = dc; \
+ val.data.inv_dst_color_coeff = di; \
+ val.data.dst_coeff_src_a = dcsa; \
+ val.data.dst_coeff_dst_a = dcda;
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+#define MSG_PREFIX "exynos/fimg2d: "
+
+#define G2D_MAX_CMD_NR 64
+#define G2D_MAX_GEM_CMD_NR 64
+#define G2D_MAX_CMD_LIST_NR 64
+
+struct g2d_context {
+ int fd;
+ unsigned int major;
+ unsigned int minor;
+ struct drm_exynos_g2d_cmd cmd[G2D_MAX_CMD_NR];
+ struct drm_exynos_g2d_cmd cmd_buf[G2D_MAX_GEM_CMD_NR];
+ unsigned int cmd_nr;
+ unsigned int cmd_buf_nr;
+ unsigned int cmdlist_nr;
+ void *event_userdata;
+};
+
+enum g2d_base_addr_reg {
+ g2d_dst = 0,
+ g2d_src
+};
+
+enum e_g2d_dir_mode {
+ G2D_DIR_MODE_POSITIVE = 0,
+ G2D_DIR_MODE_NEGATIVE = 1
+};
+
+union g2d_direction_val {
+ unsigned int val[2];
+ struct {
+ /* SRC_MSK_DIRECT_REG [0:1] (source) */
+ enum e_g2d_dir_mode src_x_direction:1;
+ enum e_g2d_dir_mode src_y_direction:1;
+
+ /* SRC_MSK_DIRECT_REG [2:3] */
+ unsigned int reversed1:2;
+
+ /* SRC_MSK_DIRECT_REG [4:5] (mask) */
+ enum e_g2d_dir_mode mask_x_direction:1;
+ enum e_g2d_dir_mode mask_y_direction:1;
+
+ /* SRC_MSK_DIRECT_REG [6:31] */
+ unsigned int padding1:26;
+
+ /* DST_PAT_DIRECT_REG [0:1] (destination) */
+ enum e_g2d_dir_mode dst_x_direction:1;
+ enum e_g2d_dir_mode dst_y_direction:1;
+
+ /* DST_PAT_DIRECT_REG [2:3] */
+ unsigned int reversed2:2;
+
+ /* DST_PAT_DIRECT_REG [4:5] (pattern) */
+ enum e_g2d_dir_mode pat_x_direction:1;
+ enum e_g2d_dir_mode pat_y_direction:1;
+
+ /* DST_PAT_DIRECT_REG [6:31] */
+ unsigned int padding2:26;
+ } data;
+};
+
+static unsigned int g2d_get_scaling(unsigned int src, unsigned int dst)
+{
+ /*
+ * The G2D hw scaling factor is a normalized inverse of the scaling factor.
+ * For example: When source width is 100 and destination width is 200
+ * (scaling of 2x), then the hw factor is NC * 100 / 200.
+ * The normalization factor (NC) is 2^16 = 0x10000.
+ */
+
+ return ((src << 16) / dst);
+}
+
+static unsigned int g2d_get_blend_op(enum e_g2d_op op)
+{
+ union g2d_blend_func_val val;
+
+ val.val = 0;
+
+ /*
+ * The switch statement is missing the default branch since
+ * we assume that the caller checks the blending operation
+ * via g2d_validate_blending_op() first.
+ */
+ switch (op) {
+ case G2D_OP_CLEAR:
+ case G2D_OP_DISJOINT_CLEAR:
+ case G2D_OP_CONJOINT_CLEAR:
+ SET_BF(val, G2D_COEFF_MODE_ZERO, 0, 0, 0, G2D_COEFF_MODE_ZERO,
+ 0, 0, 0);
+ break;
+ case G2D_OP_SRC:
+ case G2D_OP_DISJOINT_SRC:
+ case G2D_OP_CONJOINT_SRC:
+ SET_BF(val, G2D_COEFF_MODE_ONE, 0, 0, 0, G2D_COEFF_MODE_ZERO,
+ 0, 0, 0);
+ break;
+ case G2D_OP_DST:
+ case G2D_OP_DISJOINT_DST:
+ case G2D_OP_CONJOINT_DST:
+ SET_BF(val, G2D_COEFF_MODE_ZERO, 0, 0, 0, G2D_COEFF_MODE_ONE,
+ 0, 0, 0);
+ break;
+ case G2D_OP_OVER:
+ SET_BF(val, G2D_COEFF_MODE_ONE, 0, 0, 0,
+ G2D_COEFF_MODE_SRC_ALPHA, 1, 0, 0);
+ break;
+ case G2D_OP_INTERPOLATE:
+ SET_BF(val, G2D_COEFF_MODE_SRC_ALPHA, 0, 0, 0,
+ G2D_COEFF_MODE_SRC_ALPHA, 1, 0, 0);
+ break;
+ }
+
+ return val.val;
+}
+
+/*
+ * g2d_check_space - check if command buffers have enough space left.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @num_cmds: number of (regular) commands.
+ * @num_gem_cmds: number of GEM commands.
+ */
+static unsigned int g2d_check_space(const struct g2d_context *ctx,
+ unsigned int num_cmds, unsigned int num_gem_cmds)
+{
+ if (ctx->cmd_nr + num_cmds >= G2D_MAX_CMD_NR ||
+ ctx->cmd_buf_nr + num_gem_cmds >= G2D_MAX_GEM_CMD_NR)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * g2d_validate_select_mode - validate select mode.
+ *
+ * @mode: the mode to validate
+ *
+ * Returns zero for an invalid mode and one otherwise.
+ */
+static int g2d_validate_select_mode(
+ enum e_g2d_select_mode mode)
+{
+ switch (mode) {
+ case G2D_SELECT_MODE_NORMAL:
+ case G2D_SELECT_MODE_FGCOLOR:
+ case G2D_SELECT_MODE_BGCOLOR:
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * g2d_validate_blending_op - validate blending operation.
+ *
+ * @operation: the operation to validate
+ *
+ * Returns zero for an invalid mode and one otherwise.
+ */
+static int g2d_validate_blending_op(
+ enum e_g2d_op operation)
+{
+ switch (operation) {
+ case G2D_OP_CLEAR:
+ case G2D_OP_SRC:
+ case G2D_OP_DST:
+ case G2D_OP_OVER:
+ case G2D_OP_INTERPOLATE:
+ case G2D_OP_DISJOINT_CLEAR:
+ case G2D_OP_DISJOINT_SRC:
+ case G2D_OP_DISJOINT_DST:
+ case G2D_OP_CONJOINT_CLEAR:
+ case G2D_OP_CONJOINT_SRC:
+ case G2D_OP_CONJOINT_DST:
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * g2d_add_cmd - set given command and value to user side command buffer.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @cmd: command data.
+ * @value: value data.
+ *
+ * The caller has to make sure that the commands buffers have enough space
+ * left to hold the command. Use g2d_check_space() to ensure this.
+ */
+static void g2d_add_cmd(struct g2d_context *ctx, unsigned long cmd,
+ unsigned long value)
+{
+ switch (cmd & ~(G2D_BUF_USERPTR)) {
+ case SRC_BASE_ADDR_REG:
+ case SRC_PLANE2_BASE_ADDR_REG:
+ case DST_BASE_ADDR_REG:
+ case DST_PLANE2_BASE_ADDR_REG:
+ case PAT_BASE_ADDR_REG:
+ case MASK_BASE_ADDR_REG:
+ assert(ctx->cmd_buf_nr < G2D_MAX_GEM_CMD_NR);
+
+ ctx->cmd_buf[ctx->cmd_buf_nr].offset = cmd;
+ ctx->cmd_buf[ctx->cmd_buf_nr].data = value;
+ ctx->cmd_buf_nr++;
+ break;
+ default:
+ assert(ctx->cmd_nr < G2D_MAX_CMD_NR);
+
+ ctx->cmd[ctx->cmd_nr].offset = cmd;
+ ctx->cmd[ctx->cmd_nr].data = value;
+ ctx->cmd_nr++;
+ break;
+ }
+}
+
+/*
+ * g2d_add_base_addr - helper function to set dst/src base address register.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @img: a pointer to the dst/src g2d_image structure.
+ * @reg: the register that should be set.
+ */
+static void g2d_add_base_addr(struct g2d_context *ctx, struct g2d_image *img,
+ enum g2d_base_addr_reg reg)
+{
+ const unsigned long cmd = (reg == g2d_dst) ?
+ DST_BASE_ADDR_REG : SRC_BASE_ADDR_REG;
+
+ if (img->buf_type == G2D_IMGBUF_USERPTR)
+ g2d_add_cmd(ctx, cmd | G2D_BUF_USERPTR,
+ (unsigned long)&img->user_ptr[0]);
+ else
+ g2d_add_cmd(ctx, cmd, img->bo[0]);
+}
+
+/*
+ * g2d_set_direction - setup direction register (useful for overlapping blits).
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @dir: a pointer to the g2d_direction_val structure.
+ */
+static void g2d_set_direction(struct g2d_context *ctx,
+ const union g2d_direction_val *dir)
+{
+ g2d_add_cmd(ctx, SRC_MASK_DIRECT_REG, dir->val[0]);
+ g2d_add_cmd(ctx, DST_PAT_DIRECT_REG, dir->val[1]);
+}
+
+/*
+ * g2d_reset - reset fimg2d hardware.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ *
+ */
+static void g2d_reset(struct g2d_context *ctx)
+{
+ ctx->cmd_nr = 0;
+ ctx->cmd_buf_nr = 0;
+
+ g2d_add_cmd(ctx, SOFT_RESET_REG, 0x01);
+}
+
+/*
+ * g2d_flush - submit all commands and values in user side command buffer
+ * to command queue aware of fimg2d dma.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ *
+ * This function should be called after all commands and values to user
+ * side command buffer are set. It submits that buffer to the kernel side driver.
+ */
+static int g2d_flush(struct g2d_context *ctx)
+{
+ int ret;
+ struct drm_exynos_g2d_set_cmdlist cmdlist = {0};
+
+ if (ctx->cmd_nr == 0 && ctx->cmd_buf_nr == 0)
+ return 0;
+
+ if (ctx->cmdlist_nr >= G2D_MAX_CMD_LIST_NR) {
+ fprintf(stderr, MSG_PREFIX "command list overflow.\n");
+ return -EINVAL;
+ }
+
+ cmdlist.cmd = (uint64_t)(uintptr_t)&ctx->cmd[0];
+ cmdlist.cmd_buf = (uint64_t)(uintptr_t)&ctx->cmd_buf[0];
+ cmdlist.cmd_nr = ctx->cmd_nr;
+ cmdlist.cmd_buf_nr = ctx->cmd_buf_nr;
+
+ if (ctx->event_userdata) {
+ cmdlist.event_type = G2D_EVENT_NONSTOP;
+ cmdlist.user_data = (uint64_t)(uintptr_t)(ctx->event_userdata);
+ ctx->event_userdata = NULL;
+ } else {
+ cmdlist.event_type = G2D_EVENT_NOT;
+ cmdlist.user_data = 0;
+ }
+
+ ctx->cmd_nr = 0;
+ ctx->cmd_buf_nr = 0;
+
+ ret = drmIoctl(ctx->fd, DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST, &cmdlist);
+ if (ret < 0) {
+ fprintf(stderr, MSG_PREFIX "failed to set cmdlist.\n");
+ return ret;
+ }
+
+ ctx->cmdlist_nr++;
+
+ return ret;
+}
+
+/**
+ * g2d_init - create a new g2d context and get hardware version.
+ *
+ * fd: a file descriptor to an opened drm device.
+ */
+struct g2d_context *g2d_init(int fd)
+{
+ struct drm_exynos_g2d_get_ver ver;
+ struct g2d_context *ctx;
+ int ret;
+
+ ctx = calloc(1, sizeof(*ctx));
+ if (!ctx) {
+ fprintf(stderr, MSG_PREFIX "failed to allocate context.\n");
+ return NULL;
+ }
+
+ ctx->fd = fd;
+
+ ret = drmIoctl(fd, DRM_IOCTL_EXYNOS_G2D_GET_VER, &ver);
+ if (ret < 0) {
+ fprintf(stderr, MSG_PREFIX "failed to get version.\n");
+ free(ctx);
+ return NULL;
+ }
+
+ ctx->major = ver.major;
+ ctx->minor = ver.minor;
+
+ printf(MSG_PREFIX "G2D version (%d.%d).\n", ctx->major, ctx->minor);
+ return ctx;
+}
+
+void g2d_fini(struct g2d_context *ctx)
+{
+ free(ctx);
+}
+
+/**
+ * g2d_config_event - setup userdata configuration for a g2d event.
+ * The next invocation of a g2d call (e.g. g2d_solid_fill) is
+ * then going to flag the command buffer as 'nonstop'.
+ * Completion of the command buffer execution can then be
+ * determined by using drmHandleEvent on the DRM fd.
+ * The userdata is 'consumed' in the process.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @userdata: a pointer to the user data
+ */
+void g2d_config_event(struct g2d_context *ctx, void *userdata)
+{
+ ctx->event_userdata = userdata;
+}
+
+/**
+ * g2d_exec - start the dma to process all commands summited by g2d_flush().
+ *
+ * @ctx: a pointer to g2d_context structure.
+ */
+int g2d_exec(struct g2d_context *ctx)
+{
+ struct drm_exynos_g2d_exec exec;
+ int ret;
+
+ if (ctx->cmdlist_nr == 0)
+ return -EINVAL;
+
+ exec.async = 0;
+
+ ret = drmIoctl(ctx->fd, DRM_IOCTL_EXYNOS_G2D_EXEC, &exec);
+ if (ret < 0) {
+ fprintf(stderr, MSG_PREFIX "failed to execute.\n");
+ return ret;
+ }
+
+ ctx->cmdlist_nr = 0;
+
+ return ret;
+}
+
+/**
+ * g2d_solid_fill - fill given buffer with given color data.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @img: a pointer to g2d_image structure including image and buffer
+ * information.
+ * @x: x start position to buffer filled with given color data.
+ * @y: y start position to buffer filled with given color data.
+ * @w: width value to buffer filled with given color data.
+ * @h: height value to buffer filled with given color data.
+ */
+int
+g2d_solid_fill(struct g2d_context *ctx, struct g2d_image *img,
+ unsigned int x, unsigned int y, unsigned int w,
+ unsigned int h)
+{
+ union g2d_bitblt_cmd_val bitblt;
+ union g2d_point_val pt;
+
+ if (g2d_check_space(ctx, 7, 1))
+ return -ENOSPC;
+
+ g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_NORMAL);
+ g2d_add_cmd(ctx, DST_COLOR_MODE_REG, img->color_mode);
+ g2d_add_base_addr(ctx, img, g2d_dst);
+ g2d_add_cmd(ctx, DST_STRIDE_REG, img->stride);
+
+ if (x + w > img->width)
+ w = img->width - x;
+ if (y + h > img->height)
+ h = img->height - y;
+
+ pt.data.x = x;
+ pt.data.y = y;
+ g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
+
+ pt.data.x = x + w;
+ pt.data.y = y + h;
+ g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
+
+ g2d_add_cmd(ctx, SF_COLOR_REG, img->color);
+
+ bitblt.val = 0;
+ bitblt.data.fast_solid_color_fill_en = 1;
+ g2d_add_cmd(ctx, BITBLT_COMMAND_REG, bitblt.val);
+
+ return g2d_flush(ctx);
+}
+
+/**
+ * g2d_copy - copy contents in source buffer to destination buffer.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @src: a pointer to g2d_image structure including image and buffer
+ * information to source.
+ * @dst: a pointer to g2d_image structure including image and buffer
+ * information to destination.
+ * @src_x: x start position to source buffer.
+ * @src_y: y start position to source buffer.
+ * @dst_x: x start position to destination buffer.
+ * @dst_y: y start position to destination buffer.
+ * @w: width value to source and destination buffers.
+ * @h: height value to source and destination buffers.
+ */
+int
+g2d_copy(struct g2d_context *ctx, struct g2d_image *src,
+ struct g2d_image *dst, unsigned int src_x, unsigned int src_y,
+ unsigned int dst_x, unsigned dst_y, unsigned int w,
+ unsigned int h)
+{
+ union g2d_rop4_val rop4;
+ union g2d_point_val pt;
+ unsigned int src_w, src_h, dst_w, dst_h;
+
+ src_w = w;
+ src_h = h;
+ if (src_x + src->width > w)
+ src_w = src->width - src_x;
+ if (src_y + src->height > h)
+ src_h = src->height - src_y;
+
+ dst_w = w;
+ dst_h = w;
+ if (dst_x + dst->width > w)
+ dst_w = dst->width - dst_x;
+ if (dst_y + dst->height > h)
+ dst_h = dst->height - dst_y;
+
+ w = MIN(src_w, dst_w);
+ h = MIN(src_h, dst_h);
+
+ if (w <= 0 || h <= 0) {
+ fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
+ return -EINVAL;
+ }
+
+ if (g2d_check_space(ctx, 11, 2))
+ return -ENOSPC;
+
+ g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
+ g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
+ g2d_add_base_addr(ctx, dst, g2d_dst);
+ g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
+
+ g2d_add_cmd(ctx, SRC_SELECT_REG, G2D_SELECT_MODE_NORMAL);
+ g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
+ g2d_add_base_addr(ctx, src, g2d_src);
+ g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
+
+ pt.data.x = src_x;
+ pt.data.y = src_y;
+ g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
+ pt.data.x = src_x + w;
+ pt.data.y = src_y + h;
+ g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
+
+ pt.data.x = dst_x;
+ pt.data.y = dst_y;
+ g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
+ pt.data.x = dst_x + w;
+ pt.data.y = dst_y + h;
+ g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
+
+ rop4.val = 0;
+ rop4.data.unmasked_rop3 = G2D_ROP3_SRC;
+ g2d_add_cmd(ctx, ROP4_REG, rop4.val);
+
+ return g2d_flush(ctx);
+}
+
+/**
+ * g2d_move - copy content inside single buffer.
+ * Similar to libc's memmove() this copies a rectangular
+ * region of the provided buffer to another location, while
+ * properly handling the situation where source and
+ * destination rectangle overlap.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @img: a pointer to g2d_image structure providing
+ * buffer information.
+ * @src_x: x position of source rectangle.
+ * @src_y: y position of source rectangle.
+ * @dst_x: x position of destination rectangle.
+ * @dst_y: y position of destination rectangle.
+ * @w: width of rectangle to move.
+ * @h: height of rectangle to move.
+ */
+int
+g2d_move(struct g2d_context *ctx, struct g2d_image *img,
+ unsigned int src_x, unsigned int src_y,
+ unsigned int dst_x, unsigned dst_y, unsigned int w,
+ unsigned int h)
+{
+ union g2d_rop4_val rop4;
+ union g2d_point_val pt;
+ union g2d_direction_val dir;
+ unsigned int src_w, src_h, dst_w, dst_h;
+
+ src_w = w;
+ src_h = h;
+ if (src_x + img->width > w)
+ src_w = img->width - src_x;
+ if (src_y + img->height > h)
+ src_h = img->height - src_y;
+
+ dst_w = w;
+ dst_h = w;
+ if (dst_x + img->width > w)
+ dst_w = img->width - dst_x;
+ if (dst_y + img->height > h)
+ dst_h = img->height - dst_y;
+
+ w = MIN(src_w, dst_w);
+ h = MIN(src_h, dst_h);
+
+ if (w == 0 || h == 0) {
+ fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
+ return -EINVAL;
+ }
+
+ if (g2d_check_space(ctx, 13, 2))
+ return -ENOSPC;
+
+ g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
+ g2d_add_cmd(ctx, SRC_SELECT_REG, G2D_SELECT_MODE_NORMAL);
+
+ g2d_add_cmd(ctx, DST_COLOR_MODE_REG, img->color_mode);
+ g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, img->color_mode);
+
+ g2d_add_base_addr(ctx, img, g2d_dst);
+ g2d_add_base_addr(ctx, img, g2d_src);
+
+ g2d_add_cmd(ctx, DST_STRIDE_REG, img->stride);
+ g2d_add_cmd(ctx, SRC_STRIDE_REG, img->stride);
+
+ dir.val[0] = dir.val[1] = 0;
+
+ if (dst_x >= src_x)
+ dir.data.src_x_direction = dir.data.dst_x_direction = 1;
+ if (dst_y >= src_y)
+ dir.data.src_y_direction = dir.data.dst_y_direction = 1;
+
+ g2d_set_direction(ctx, &dir);
+
+ pt.data.x = src_x;
+ pt.data.y = src_y;
+ g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
+ pt.data.x = src_x + w;
+ pt.data.y = src_y + h;
+ g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
+
+ pt.data.x = dst_x;
+ pt.data.y = dst_y;
+ g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
+ pt.data.x = dst_x + w;
+ pt.data.y = dst_y + h;
+ g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
+
+ rop4.val = 0;
+ rop4.data.unmasked_rop3 = G2D_ROP3_SRC;
+ g2d_add_cmd(ctx, ROP4_REG, rop4.val);
+
+ return g2d_flush(ctx);
+}
+
+/**
+ * g2d_copy_with_scale - copy contents in source buffer to destination buffer
+ * scaling up or down properly.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @src: a pointer to g2d_image structure including image and buffer
+ * information to source.
+ * @dst: a pointer to g2d_image structure including image and buffer
+ * information to destination.
+ * @src_x: x start position to source buffer.
+ * @src_y: y start position to source buffer.
+ * @src_w: width value to source buffer.
+ * @src_h: height value to source buffer.
+ * @dst_x: x start position to destination buffer.
+ * @dst_y: y start position to destination buffer.
+ * @dst_w: width value to destination buffer.
+ * @dst_h: height value to destination buffer.
+ * @negative: indicate that it uses color negative to source and
+ * destination buffers.
+ */
+int
+g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
+ struct g2d_image *dst, unsigned int src_x,
+ unsigned int src_y, unsigned int src_w,
+ unsigned int src_h, unsigned int dst_x,
+ unsigned int dst_y, unsigned int dst_w,
+ unsigned int dst_h, unsigned int negative)
+{
+ union g2d_rop4_val rop4;
+ union g2d_point_val pt;
+ unsigned int scale, repeat_pad;
+ unsigned int scale_x, scale_y;
+
+ /* Sanitize this parameter to facilitate space computation below. */
+ if (negative)
+ negative = 1;
+
+ if (src_w == dst_w && src_h == dst_h)
+ scale = 0;
+ else {
+ scale = 1;
+ scale_x = g2d_get_scaling(src_w, dst_w);
+ scale_y = g2d_get_scaling(src_h, dst_h);
+ }
+
+ repeat_pad = src->repeat_mode == G2D_REPEAT_MODE_PAD ? 1 : 0;
+
+ if (src_x + src_w > src->width)
+ src_w = src->width - src_x;
+ if (src_y + src_h > src->height)
+ src_h = src->height - src_y;
+
+ if (dst_x + dst_w > dst->width)
+ dst_w = dst->width - dst_x;
+ if (dst_y + dst_h > dst->height)
+ dst_h = dst->height - dst_y;
+
+ if (src_w <= 0 || src_h <= 0 || dst_w <= 0 || dst_h <= 0) {
+ fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
+ return -EINVAL;
+ }
+
+ if (g2d_check_space(ctx, 12 + scale * 3 + negative + repeat_pad, 2))
+ return -ENOSPC;
+
+ g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
+ g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
+ g2d_add_base_addr(ctx, dst, g2d_dst);
+ g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
+
+ g2d_add_cmd(ctx, SRC_SELECT_REG, G2D_SELECT_MODE_NORMAL);
+ g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
+
+ g2d_add_cmd(ctx, SRC_REPEAT_MODE_REG, src->repeat_mode);
+ if (repeat_pad)
+ g2d_add_cmd(ctx, SRC_PAD_VALUE_REG, dst->color);
+
+ g2d_add_base_addr(ctx, src, g2d_src);
+ g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
+
+ rop4.val = 0;
+ rop4.data.unmasked_rop3 = G2D_ROP3_SRC;
+
+ if (negative) {
+ g2d_add_cmd(ctx, BG_COLOR_REG, 0x00FFFFFF);
+ rop4.data.unmasked_rop3 ^= G2D_ROP3_DST;
+ }
+
+ g2d_add_cmd(ctx, ROP4_REG, rop4.val);
+
+ if (scale) {
+ g2d_add_cmd(ctx, SRC_SCALE_CTRL_REG, G2D_SCALE_MODE_BILINEAR);
+ g2d_add_cmd(ctx, SRC_XSCALE_REG, scale_x);
+ g2d_add_cmd(ctx, SRC_YSCALE_REG, scale_y);
+ }
+
+ pt.data.x = src_x;
+ pt.data.y = src_y;
+ g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
+ pt.data.x = src_x + src_w;
+ pt.data.y = src_y + src_h;
+ g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
+
+ pt.data.x = dst_x;
+ pt.data.y = dst_y;
+ g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
+ pt.data.x = dst_x + dst_w;
+ pt.data.y = dst_y + dst_h;
+ g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
+
+ return g2d_flush(ctx);
+}
+
+/**
+ * g2d_blend - blend image data in source and destination buffers.
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @src: a pointer to g2d_image structure including image and buffer
+ * information to source.
+ * @dst: a pointer to g2d_image structure including image and buffer
+ * information to destination.
+ * @src_x: x start position to source buffer.
+ * @src_y: y start position to source buffer.
+ * @dst_x: x start position to destination buffer.
+ * @dst_y: y start position to destination buffer.
+ * @w: width value to source and destination buffer.
+ * @h: height value to source and destination buffer.
+ * @op: blend operation type.
+ */
+int
+g2d_blend(struct g2d_context *ctx, struct g2d_image *src,
+ struct g2d_image *dst, unsigned int src_x,
+ unsigned int src_y, unsigned int dst_x, unsigned int dst_y,
+ unsigned int w, unsigned int h, enum e_g2d_op op)
+{
+ union g2d_point_val pt;
+ union g2d_bitblt_cmd_val bitblt;
+ union g2d_blend_func_val blend;
+ unsigned int gem_space;
+ unsigned int src_w, src_h, dst_w, dst_h;
+
+ src_w = w;
+ src_h = h;
+ if (src_x + w > src->width)
+ src_w = src->width - src_x;
+ if (src_y + h > src->height)
+ src_h = src->height - src_y;
+
+ dst_w = w;
+ dst_h = h;
+ if (dst_x + w > dst->width)
+ dst_w = dst->width - dst_x;
+ if (dst_y + h > dst->height)
+ dst_h = dst->height - dst_y;
+
+ w = MIN(src_w, dst_w);
+ h = MIN(src_h, dst_h);
+
+ if (w <= 0 || h <= 0) {
+ fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
+ return -EINVAL;
+ }
+
+ if (!g2d_validate_select_mode(src->select_mode)) {
+ fprintf(stderr , MSG_PREFIX "invalid select mode for source.\n");
+ return -EINVAL;
+ }
+
+ if (!g2d_validate_blending_op(op)) {
+ fprintf(stderr , MSG_PREFIX "unsupported blending operation.\n");
+ return -EINVAL;
+ }
+
+ gem_space = src->select_mode == G2D_SELECT_MODE_NORMAL ? 2 : 1;
+
+ if (g2d_check_space(ctx, 12, gem_space))
+ return -ENOSPC;
+
+ bitblt.val = 0;
+ blend.val = 0;
+
+ if (op == G2D_OP_SRC || op == G2D_OP_CLEAR)
+ g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
+ else
+ g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_NORMAL);
+
+ g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
+ g2d_add_base_addr(ctx, dst, g2d_dst);
+ g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
+
+ g2d_add_cmd(ctx, SRC_SELECT_REG, src->select_mode);
+ g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
+
+ switch (src->select_mode) {
+ case G2D_SELECT_MODE_NORMAL:
+ g2d_add_base_addr(ctx, src, g2d_src);
+ g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
+ break;
+ case G2D_SELECT_MODE_FGCOLOR:
+ g2d_add_cmd(ctx, FG_COLOR_REG, src->color);
+ break;
+ case G2D_SELECT_MODE_BGCOLOR:
+ g2d_add_cmd(ctx, BG_COLOR_REG, src->color);
+ break;
+ }
+
+ bitblt.data.alpha_blend_mode = G2D_ALPHA_BLEND_MODE_ENABLE;
+ blend.val = g2d_get_blend_op(op);
+ g2d_add_cmd(ctx, BITBLT_COMMAND_REG, bitblt.val);
+ g2d_add_cmd(ctx, BLEND_FUNCTION_REG, blend.val);
+
+ pt.data.x = src_x;
+ pt.data.y = src_y;
+ g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
+ pt.data.x = src_x + w;
+ pt.data.y = src_y + h;
+ g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
+
+ pt.data.x = dst_x;
+ pt.data.y = dst_y;
+ g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
+ pt.data.x = dst_x + w;
+ pt.data.y = dst_y + h;
+ g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
+
+ return g2d_flush(ctx);
+}
+
+/**
+ * g2d_scale_and_blend - apply scaling to source buffer and then blend to destination buffer
+ *
+ * @ctx: a pointer to g2d_context structure.
+ * @src: a pointer to g2d_image structure including image and buffer
+ * information to source.
+ * @dst: a pointer to g2d_image structure including image and buffer
+ * information to destination.
+ * @src_x: x start position to source buffer.
+ * @src_y: y start position to source buffer.
+ * @src_w: width value to source buffer.
+ * @src_h: height value to source buffer.
+ * @dst_x: x start position to destination buffer.
+ * @dst_y: y start position to destination buffer.
+ * @dst_w: width value to destination buffer.
+ * @dst_h: height value to destination buffer.
+ * @op: blend operation type.
+ */
+int
+g2d_scale_and_blend(struct g2d_context *ctx, struct g2d_image *src,
+ struct g2d_image *dst, unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h, unsigned int dst_x,
+ unsigned int dst_y, unsigned int dst_w, unsigned int dst_h,
+ enum e_g2d_op op)
+{
+ union g2d_point_val pt;
+ union g2d_bitblt_cmd_val bitblt;
+ union g2d_blend_func_val blend;
+ unsigned int scale, gem_space;
+ unsigned int scale_x, scale_y;
+
+ if (src_w == dst_w && src_h == dst_h)
+ scale = 0;
+ else {
+ scale = 1;
+ scale_x = g2d_get_scaling(src_w, dst_w);
+ scale_y = g2d_get_scaling(src_h, dst_h);
+ }
+
+ if (src_x + src_w > src->width)
+ src_w = src->width - src_x;
+ if (src_y + src_h > src->height)
+ src_h = src->height - src_y;
+
+ if (dst_x + dst_w > dst->width)
+ dst_w = dst->width - dst_x;
+ if (dst_y + dst_h > dst->height)
+ dst_h = dst->height - dst_y;
+
+ if (src_w <= 0 || src_h <= 0 || dst_w <= 0 || dst_h <= 0) {
+ fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
+ return -EINVAL;
+ }
+
+ if (!g2d_validate_select_mode(src->select_mode)) {
+ fprintf(stderr , MSG_PREFIX "invalid select mode for source.\n");
+ return -EINVAL;
+ }
+
+ if (!g2d_validate_blending_op(op)) {
+ fprintf(stderr , MSG_PREFIX "unsupported blending operation.\n");
+ return -EINVAL;
+ }
+
+ gem_space = src->select_mode == G2D_SELECT_MODE_NORMAL ? 2 : 1;
+
+ if (g2d_check_space(ctx, 12 + scale * 3, gem_space))
+ return -ENOSPC;
+
+ bitblt.val = 0;
+ blend.val = 0;
+
+ if (op == G2D_OP_SRC || op == G2D_OP_CLEAR)
+ g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
+ else
+ g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_NORMAL);
+
+ g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
+ g2d_add_base_addr(ctx, dst, g2d_dst);
+ g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
+
+ g2d_add_cmd(ctx, SRC_SELECT_REG, src->select_mode);
+ g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
+
+ switch (src->select_mode) {
+ case G2D_SELECT_MODE_NORMAL:
+ g2d_add_base_addr(ctx, src, g2d_src);
+ g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
+ break;
+ case G2D_SELECT_MODE_FGCOLOR:
+ g2d_add_cmd(ctx, FG_COLOR_REG, src->color);
+ break;
+ case G2D_SELECT_MODE_BGCOLOR:
+ g2d_add_cmd(ctx, BG_COLOR_REG, src->color);
+ break;
+ }
+
+ if (scale) {
+ g2d_add_cmd(ctx, SRC_SCALE_CTRL_REG, G2D_SCALE_MODE_BILINEAR);
+ g2d_add_cmd(ctx, SRC_XSCALE_REG, scale_x);
+ g2d_add_cmd(ctx, SRC_YSCALE_REG, scale_y);
+ }
+
+ bitblt.data.alpha_blend_mode = G2D_ALPHA_BLEND_MODE_ENABLE;
+ blend.val = g2d_get_blend_op(op);
+ g2d_add_cmd(ctx, BITBLT_COMMAND_REG, bitblt.val);
+ g2d_add_cmd(ctx, BLEND_FUNCTION_REG, blend.val);
+
+ pt.data.x = src_x;
+ pt.data.y = src_y;
+ g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
+ pt.data.x = src_x + src_w;
+ pt.data.y = src_y + src_h;
+ g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
+
+ pt.data.x = dst_x;
+ pt.data.y = dst_y;
+ g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
+ pt.data.x = dst_x + dst_w;
+ pt.data.y = dst_y + dst_h;
+ g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
+
+ return g2d_flush(ctx);
+}
diff --git a/chromium/third_party/libdrm/src/exynos/exynos_fimg2d.h b/chromium/third_party/libdrm/src/exynos/exynos_fimg2d.h
new file mode 100644
index 00000000000..a825c6831b3
--- /dev/null
+++ b/chromium/third_party/libdrm/src/exynos/exynos_fimg2d.h
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _FIMG2D_H_
+#define _FIMG2D_H_
+
+#define G2D_PLANE_MAX_NR 2
+
+enum e_g2d_color_mode {
+ /* COLOR FORMAT */
+ G2D_COLOR_FMT_XRGB8888,
+ G2D_COLOR_FMT_ARGB8888,
+ G2D_COLOR_FMT_RGB565,
+ G2D_COLOR_FMT_XRGB1555,
+ G2D_COLOR_FMT_ARGB1555,
+ G2D_COLOR_FMT_XRGB4444,
+ G2D_COLOR_FMT_ARGB4444,
+ G2D_COLOR_FMT_PRGB888,
+ G2D_COLOR_FMT_YCbCr444,
+ G2D_COLOR_FMT_YCbCr422,
+ G2D_COLOR_FMT_YCbCr420,
+ /* alpha 8bit */
+ G2D_COLOR_FMT_A8,
+ /* Luminance 8bit: gray color */
+ G2D_COLOR_FMT_L8,
+ /* alpha 1bit */
+ G2D_COLOR_FMT_A1,
+ /* alpha 4bit */
+ G2D_COLOR_FMT_A4,
+ G2D_COLOR_FMT_MASK, /* VER4.1 */
+
+ /* COLOR ORDER */
+ G2D_ORDER_AXRGB = (0 << 4), /* VER4.1 */
+ G2D_ORDER_RGBAX = (1 << 4), /* VER4.1 */
+ G2D_ORDER_AXBGR = (2 << 4), /* VER4.1 */
+ G2D_ORDER_BGRAX = (3 << 4), /* VER4.1 */
+ G2D_ORDER_MASK = (3 << 4), /* VER4.1 */
+
+ /* Number of YCbCr plane */
+ G2D_YCbCr_1PLANE = (0 << 8), /* VER4.1 */
+ G2D_YCbCr_2PLANE = (1 << 8), /* VER4.1 */
+ G2D_YCbCr_PLANE_MASK = (3 << 8), /* VER4.1 */
+
+ /* Order in YCbCr */
+ G2D_YCbCr_ORDER_CrY1CbY0 = (0 << 12), /* VER4.1 */
+ G2D_YCbCr_ORDER_CbY1CrY0 = (1 << 12), /* VER4.1 */
+ G2D_YCbCr_ORDER_Y1CrY0Cb = (2 << 12), /* VER4.1 */
+ G2D_YCbCr_ORDER_Y1CbY0Cr = (3 << 12), /* VER4.1 */
+ G2D_YCbCr_ORDER_CrCb = G2D_YCbCr_ORDER_CrY1CbY0, /* VER4.1 */
+ G2D_YCbCr_ORDER_CbCr = G2D_YCbCr_ORDER_CbY1CrY0, /* VER4.1 */
+ G2D_YCbCr_ORDER_MASK = (3 < 12), /* VER4.1 */
+
+ /* CSC */
+ G2D_CSC_601 = (0 << 16), /* VER4.1 */
+ G2D_CSC_709 = (1 << 16), /* VER4.1 */
+ G2D_CSC_MASK = (1 << 16), /* VER4.1 */
+
+ /* Valid value range of YCbCr */
+ G2D_YCbCr_RANGE_NARROW = (0 << 17), /* VER4.1 */
+ G2D_YCbCr_RANGE_WIDE = (1 << 17), /* VER4.1 */
+ G2D_YCbCr_RANGE_MASK = (1 << 17), /* VER4.1 */
+
+ G2D_COLOR_MODE_MASK = 0xFFFFFFFF,
+};
+
+enum e_g2d_select_mode {
+ G2D_SELECT_MODE_NORMAL = (0 << 0),
+ G2D_SELECT_MODE_FGCOLOR = (1 << 0),
+ G2D_SELECT_MODE_BGCOLOR = (2 << 0),
+};
+
+enum e_g2d_repeat_mode {
+ G2D_REPEAT_MODE_REPEAT,
+ G2D_REPEAT_MODE_PAD,
+ G2D_REPEAT_MODE_REFLECT,
+ G2D_REPEAT_MODE_CLAMP,
+ G2D_REPEAT_MODE_NONE,
+};
+
+enum e_g2d_scale_mode {
+ G2D_SCALE_MODE_NONE = 0,
+ G2D_SCALE_MODE_NEAREST,
+ G2D_SCALE_MODE_BILINEAR,
+ G2D_SCALE_MODE_MAX,
+};
+
+enum e_g2d_buf_type {
+ G2D_IMGBUF_COLOR,
+ G2D_IMGBUF_GEM,
+ G2D_IMGBUF_USERPTR,
+};
+
+enum e_g2d_rop3_type {
+ G2D_ROP3_DST = 0xAA,
+ G2D_ROP3_SRC = 0xCC,
+ G2D_ROP3_3RD = 0xF0,
+ G2D_ROP3_MASK = 0xFF,
+};
+
+enum e_g2d_select_alpha_src {
+ G2D_SELECT_SRC_FOR_ALPHA_BLEND, /* VER4.1 */
+ G2D_SELECT_ROP_FOR_ALPHA_BLEND, /* VER4.1 */
+};
+
+enum e_g2d_transparent_mode {
+ G2D_TRANSPARENT_MODE_OPAQUE,
+ G2D_TRANSPARENT_MODE_TRANSPARENT,
+ G2D_TRANSPARENT_MODE_BLUESCREEN,
+ G2D_TRANSPARENT_MODE_MAX,
+};
+
+enum e_g2d_color_key_mode {
+ G2D_COLORKEY_MODE_DISABLE = 0,
+ G2D_COLORKEY_MODE_SRC_RGBA = (1<<0),
+ G2D_COLORKEY_MODE_DST_RGBA = (1<<1),
+ G2D_COLORKEY_MODE_SRC_YCbCr = (1<<2), /* VER4.1 */
+ G2D_COLORKEY_MODE_DST_YCbCr = (1<<3), /* VER4.1 */
+ G2D_COLORKEY_MODE_MASK = 15,
+};
+
+enum e_g2d_alpha_blend_mode {
+ G2D_ALPHA_BLEND_MODE_DISABLE,
+ G2D_ALPHA_BLEND_MODE_ENABLE,
+ G2D_ALPHA_BLEND_MODE_FADING, /* VER3.0 */
+ G2D_ALPHA_BLEND_MODE_MAX,
+};
+
+enum e_g2d_op {
+ G2D_OP_CLEAR = 0x00,
+ G2D_OP_SRC = 0x01,
+ G2D_OP_DST = 0x02,
+ G2D_OP_OVER = 0x03,
+ G2D_OP_INTERPOLATE = 0x04,
+ G2D_OP_DISJOINT_CLEAR = 0x10,
+ G2D_OP_DISJOINT_SRC = 0x11,
+ G2D_OP_DISJOINT_DST = 0x12,
+ G2D_OP_CONJOINT_CLEAR = 0x20,
+ G2D_OP_CONJOINT_SRC = 0x21,
+ G2D_OP_CONJOINT_DST = 0x22,
+};
+
+/*
+ * The G2D_COEFF_MODE_DST_{COLOR,ALPHA} modes both use the ALPHA_REG(0x618)
+ * register. The registers fields are as follows:
+ * bits 31:8 = color value (RGB order)
+ * bits 7:0 = alpha value
+ */
+enum e_g2d_coeff_mode {
+ G2D_COEFF_MODE_ONE,
+ G2D_COEFF_MODE_ZERO,
+ G2D_COEFF_MODE_SRC_ALPHA,
+ G2D_COEFF_MODE_SRC_COLOR,
+ G2D_COEFF_MODE_DST_ALPHA,
+ G2D_COEFF_MODE_DST_COLOR,
+ /* Global Alpha : Set by ALPHA_REG(0x618) */
+ G2D_COEFF_MODE_GB_ALPHA,
+ /* Global Color and Alpha : Set by ALPHA_REG(0x618) */
+ G2D_COEFF_MODE_GB_COLOR,
+ /* (1-SRC alpha)/DST Alpha */
+ G2D_COEFF_MODE_DISJOINT_S,
+ /* (1-DST alpha)/SRC Alpha */
+ G2D_COEFF_MODE_DISJOINT_D,
+ /* SRC alpha/DST alpha */
+ G2D_COEFF_MODE_CONJOINT_S,
+ /* DST alpha/SRC alpha */
+ G2D_COEFF_MODE_CONJOINT_D,
+ /* DST alpha/SRC alpha */
+ G2D_COEFF_MODE_MASK
+};
+
+enum e_g2d_acoeff_mode {
+ G2D_ACOEFF_MODE_A, /* alpha */
+ G2D_ACOEFF_MODE_APGA, /* alpha + global alpha */
+ G2D_ACOEFF_MODE_AMGA, /* alpha * global alpha */
+ G2D_ACOEFF_MODE_MASK
+};
+
+union g2d_point_val {
+ unsigned int val;
+ struct {
+ /*
+ * Coordinate of Source Image
+ * Range: 0 ~ 8000 (Requirement: SrcLeftX < SrcRightX)
+ * In YCbCr 422 and YCbCr 420 format with even number.
+ */
+ unsigned int x:16;
+ /*
+ * Y Coordinate of Source Image
+ * Range: 0 ~ 8000 (Requirement: SrcTopY < SrcBottomY)
+ * In YCbCr 420 format with even number.
+ */
+ unsigned int y:16;
+ } data;
+};
+
+union g2d_rop4_val {
+ unsigned int val;
+ struct {
+ enum e_g2d_rop3_type unmasked_rop3:8;
+ enum e_g2d_rop3_type masked_rop3:8;
+ unsigned int reserved:16;
+ } data;
+};
+
+union g2d_bitblt_cmd_val {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int mask_rop4_en:1;
+ unsigned int masking_en:1;
+ enum e_g2d_select_alpha_src rop4_alpha_en:1;
+ unsigned int dither_en:1;
+ /* [4:7] */
+ unsigned int resolved1:4;
+ /* [8:11] */
+ unsigned int cw_en:4;
+ /* [12:15] */
+ enum e_g2d_transparent_mode transparent_mode:4;
+ /* [16:19] */
+ enum e_g2d_color_key_mode color_key_mode:4;
+ /* [20:23] */
+ enum e_g2d_alpha_blend_mode alpha_blend_mode:4;
+ /* [24:27] */
+ unsigned int src_pre_multiply:1;
+ unsigned int pat_pre_multiply:1;
+ unsigned int dst_pre_multiply:1;
+ unsigned int dst_depre_multiply:1;
+ /* [28:31] */
+ unsigned int fast_solid_color_fill_en:1;
+ unsigned int reserved:3;
+ } data;
+};
+
+union g2d_blend_func_val {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ enum e_g2d_coeff_mode src_coeff:4;
+ enum e_g2d_acoeff_mode src_coeff_src_a:2;
+ enum e_g2d_acoeff_mode src_coeff_dst_a:2;
+ enum e_g2d_coeff_mode dst_coeff:4;
+ enum e_g2d_acoeff_mode dst_coeff_src_a:2;
+ enum e_g2d_acoeff_mode dst_coeff_dst_a:2;
+ /* [16:19] */
+ unsigned int inv_src_color_coeff:1;
+ unsigned int resoled1:1;
+ unsigned int inv_dst_color_coeff:1;
+ unsigned int resoled2:1;
+ /* [20:23] */
+ unsigned int lighten_en:1;
+ unsigned int darken_en:1;
+ unsigned int win_ce_src_over_en:2;
+ /* [24:31] */
+ unsigned int reserved:8;
+ } data;
+};
+
+struct g2d_image {
+ enum e_g2d_select_mode select_mode;
+ enum e_g2d_color_mode color_mode;
+ enum e_g2d_repeat_mode repeat_mode;
+ enum e_g2d_scale_mode scale_mode;
+ unsigned int xscale;
+ unsigned int yscale;
+ unsigned char rotate_90;
+ unsigned char x_dir;
+ unsigned char y_dir;
+ unsigned char component_alpha;
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int need_free;
+ unsigned int color;
+ enum e_g2d_buf_type buf_type;
+ unsigned int bo[G2D_PLANE_MAX_NR];
+ struct drm_exynos_g2d_userptr user_ptr[G2D_PLANE_MAX_NR];
+ void *mapped_ptr[G2D_PLANE_MAX_NR];
+};
+
+struct g2d_context;
+
+struct g2d_context *g2d_init(int fd);
+void g2d_fini(struct g2d_context *ctx);
+void g2d_config_event(struct g2d_context *ctx, void *userdata);
+int g2d_exec(struct g2d_context *ctx);
+int g2d_solid_fill(struct g2d_context *ctx, struct g2d_image *img,
+ unsigned int x, unsigned int y, unsigned int w,
+ unsigned int h);
+int g2d_copy(struct g2d_context *ctx, struct g2d_image *src,
+ struct g2d_image *dst, unsigned int src_x,
+ unsigned int src_y, unsigned int dst_x, unsigned int dst_y,
+ unsigned int w, unsigned int h);
+int g2d_move(struct g2d_context *ctx, struct g2d_image *img,
+ unsigned int src_x, unsigned int src_y, unsigned int dst_x,
+ unsigned dst_y, unsigned int w, unsigned int h);
+int g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
+ struct g2d_image *dst, unsigned int src_x,
+ unsigned int src_y, unsigned int src_w,
+ unsigned int src_h, unsigned int dst_x,
+ unsigned int dst_y, unsigned int dst_w,
+ unsigned int dst_h, unsigned int negative);
+int g2d_blend(struct g2d_context *ctx, struct g2d_image *src,
+ struct g2d_image *dst, unsigned int src_x,
+ unsigned int src_y, unsigned int dst_x, unsigned int dst_y,
+ unsigned int w, unsigned int h, enum e_g2d_op op);
+int g2d_scale_and_blend(struct g2d_context *ctx, struct g2d_image *src,
+ struct g2d_image *dst, unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h, unsigned int dst_x,
+ unsigned int dst_y, unsigned int dst_w, unsigned int dst_h,
+ enum e_g2d_op op);
+#endif /* _FIMG2D_H_ */
diff --git a/chromium/third_party/libdrm/src/exynos/fimg2d_reg.h b/chromium/third_party/libdrm/src/exynos/fimg2d_reg.h
new file mode 100644
index 00000000000..07dd6349dd1
--- /dev/null
+++ b/chromium/third_party/libdrm/src/exynos/fimg2d_reg.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _FIMG2D_REG_H_
+#define _FIMG2D_REG_H_
+
+#define SOFT_RESET_REG (0x0000)
+#define INTEN_REG (0x0004)
+#define INTC_PEND_REG (0x000C)
+#define FIFO_STAT_REG (0x0010)
+#define AXI_MODE_REG (0x001C)
+#define DMA_SFR_BASE_ADDR_REG (0x0080)
+#define DMA_COMMAND_REG (0x0084)
+#define DMA_EXE_LIST_NUM_REG (0x0088)
+#define DMA_STATUS_REG (0x008C)
+#define DMA_HOLD_CMD_REG (0x0090)
+
+/* COMMAND REGISTER */
+#define BITBLT_START_REG (0x0100)
+#define BITBLT_COMMAND_REG (0x0104)
+#define BLEND_FUNCTION_REG (0x0108) /* VER4.1 */
+#define ROUND_MODE_REG (0x010C) /* VER4.1 */
+
+/* PARAMETER SETTING REGISTER */
+#define ROTATE_REG (0x0200)
+#define SRC_MASK_DIRECT_REG (0x0204)
+#define DST_PAT_DIRECT_REG (0x0208)
+
+/* SOURCE */
+#define SRC_SELECT_REG (0x0300)
+#define SRC_BASE_ADDR_REG (0x0304)
+#define SRC_STRIDE_REG (0x0308)
+#define SRC_COLOR_MODE_REG (0x030c)
+#define SRC_LEFT_TOP_REG (0x0310)
+#define SRC_RIGHT_BOTTOM_REG (0x0314)
+#define SRC_PLANE2_BASE_ADDR_REG (0x0318) /* VER4.1 */
+#define SRC_REPEAT_MODE_REG (0x031C)
+#define SRC_PAD_VALUE_REG (0x0320)
+#define SRC_A8_RGB_EXT_REG (0x0324)
+#define SRC_SCALE_CTRL_REG (0x0328)
+#define SRC_XSCALE_REG (0x032C)
+#define SRC_YSCALE_REG (0x0330)
+
+/* DESTINATION */
+#define DST_SELECT_REG (0x0400)
+#define DST_BASE_ADDR_REG (0x0404)
+#define DST_STRIDE_REG (0x0408)
+#define DST_COLOR_MODE_REG (0x040C)
+#define DST_LEFT_TOP_REG (0x0410)
+#define DST_RIGHT_BOTTOM_REG (0x0414)
+#define DST_PLANE2_BASE_ADDR_REG (0x0418) /* VER4.1 */
+#define DST_A8_RGB_EXT_REG (0x041C)
+
+/* PATTERN */
+#define PAT_BASE_ADDR_REG (0x0500)
+#define PAT_SIZE_REG (0x0504)
+#define PAT_COLOR_MODE_REG (0x0508)
+#define PAT_OFFSET_REG (0x050C)
+#define PAT_STRIDE_REG (0x0510)
+
+/* MASK */
+#define MASK_BASE_ADDR_REG (0x0520)
+#define MASK_STRIDE_REG (0x0524)
+#define MASK_LEFT_TOP_REG (0x0528) /* VER4.1 */
+#define MASK_RIGHT_BOTTOM_REG (0x052C) /* VER4.1 */
+#define MASK_MODE_REG (0x0530) /* VER4.1 */
+#define MASK_REPEAT_MODE_REG (0x0534)
+#define MASK_PAD_VALUE_REG (0x0538)
+#define MASK_SCALE_CTRL_REG (0x053C)
+#define MASK_XSCALE_REG (0x0540)
+#define MASK_YSCALE_REG (0x0544)
+
+/* CLIPPING WINDOW */
+#define CW_LT_REG (0x0600)
+#define CW_RB_REG (0x0604)
+
+/* ROP & ALPHA SETTING */
+#define THIRD_OPERAND_REG (0x0610)
+#define ROP4_REG (0x0614)
+#define ALPHA_REG (0x0618)
+
+/* COLOR SETTING */
+#define FG_COLOR_REG (0x0700)
+#define BG_COLOR_REG (0x0704)
+#define BS_COLOR_REG (0x0708)
+#define SF_COLOR_REG (0x070C) /* VER4.1 */
+
+/* COLOR KEY */
+#define SRC_COLORKEY_CTRL_REG (0x0710)
+#define SRC_COLORKEY_DR_MIN_REG (0x0714)
+#define SRC_COLORKEY_DR_MAX_REG (0x0718)
+#define DST_COLORKEY_CTRL_REG (0x071C)
+#define DST_COLORKEY_DR_MIN_REG (0x0720)
+#define DST_COLORKEY_DR_MAX_REG (0x0724)
+/* YCbCr src Color Key */
+#define YCbCr_SRC_COLORKEY_CTRL_REG (0x0728) /* VER4.1 */
+#define YCbCr_SRC_COLORKEY_DR_MIN_REG (0x072C) /* VER4.1 */
+#define YCbCr_SRC_COLORKEY_DR_MAX_REG (0x0730) /* VER4.1 */
+/*Y CbCr dst Color Key */
+#define YCbCr_DST_COLORKEY_CTRL_REG (0x0734) /* VER4.1 */
+#define YCbCr_DST_COLORKEY_DR_MIN_REG (0x0738) /* VER4.1 */
+#define YCbCr_DST_COLORKEY_DR_MAX_REG (0x073C) /* VER4.1 */
+
+#endif
+
diff --git a/chromium/third_party/libdrm/src/exynos/libdrm_exynos.pc.in b/chromium/third_party/libdrm/src/exynos/libdrm_exynos.pc.in
new file mode 100644
index 00000000000..ff1c43203d0
--- /dev/null
+++ b/chromium/third_party/libdrm/src/exynos/libdrm_exynos.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_exynos
+Description: Userspace interface to exynos kernel DRM services
+Version: 0.7
+Libs: -L${libdir} -ldrm_exynos
+Cflags: -I${includedir} -I${includedir}/libdrm -I${includedir}/exynos
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/freedreno/Android.mk b/chromium/third_party/libdrm/src/freedreno/Android.mk
new file mode 100644
index 00000000000..fba48f2f087
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/Android.mk
@@ -0,0 +1,18 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+# Import variables LIBDRM_FREEDRENO_FILES, LIBDRM_FREEDRENO_H_FILES
+include $(LOCAL_PATH)/Makefile.sources
+
+LOCAL_MODULE := libdrm_freedreno
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES := $(patsubst %.h, , $(LIBDRM_FREEDRENO_FILES))
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+
+LOCAL_CFLAGS := \
+ -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/chromium/third_party/libdrm/src/freedreno/Makefile.am b/chromium/third_party/libdrm/src/freedreno/Makefile.am
new file mode 100644
index 00000000000..0771d146b95
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/Makefile.am
@@ -0,0 +1,30 @@
+AUTOMAKE_OPTIONS=subdir-objects
+include Makefile.sources
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ $(PTHREADSTUBS_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_freedreno_la_LTLIBRARIES = libdrm_freedreno.la
+libdrm_freedreno_ladir = $(libdir)
+libdrm_freedreno_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libdrm_freedreno_la_LIBADD = \
+ ../libdrm.la \
+ @PTHREADSTUBS_LIBS@ \
+ @CLOCK_LIB@
+
+libdrm_freedreno_la_SOURCES = $(LIBDRM_FREEDRENO_FILES)
+if HAVE_FREEDRENO_KGSL
+libdrm_freedreno_la_SOURCES += $(LIBDRM_FREEDRENO_KGSL_FILES)
+endif
+
+libdrm_freedrenocommonincludedir = ${includedir}/freedreno
+libdrm_freedrenocommoninclude_HEADERS = $(LIBDRM_FREEDRENO_H_FILES)
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm_freedreno.pc
+
+TESTS = freedreno-symbol-check
+EXTRA_DIST = $(TESTS)
diff --git a/chromium/third_party/libdrm/src/freedreno/Makefile.sources b/chromium/third_party/libdrm/src/freedreno/Makefile.sources
new file mode 100644
index 00000000000..68a679bf631
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/Makefile.sources
@@ -0,0 +1,26 @@
+LIBDRM_FREEDRENO_FILES := \
+ freedreno_device.c \
+ freedreno_pipe.c \
+ freedreno_priv.h \
+ freedreno_ringbuffer.c \
+ freedreno_bo.c \
+ freedreno_bo_cache.c \
+ msm/msm_bo.c \
+ msm/msm_device.c \
+ msm/msm_drm.h \
+ msm/msm_pipe.c \
+ msm/msm_priv.h \
+ msm/msm_ringbuffer.c
+
+LIBDRM_FREEDRENO_KGSL_FILES := \
+ kgsl/kgsl_bo.c \
+ kgsl/kgsl_device.c \
+ kgsl/kgsl_drm.h \
+ kgsl/kgsl_pipe.c \
+ kgsl/kgsl_priv.h \
+ kgsl/kgsl_ringbuffer.c \
+ kgsl/msm_kgsl.h
+
+LIBDRM_FREEDRENO_H_FILES := \
+ freedreno_drmif.h \
+ freedreno_ringbuffer.h
diff --git a/chromium/third_party/libdrm/src/freedreno/freedreno-symbol-check b/chromium/third_party/libdrm/src/freedreno/freedreno-symbol-check
new file mode 100755
index 00000000000..7b31a347001
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/freedreno-symbol-check
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+# The following symbols (past the first five) are taken from the public headers.
+# A list of the latter should be available Makefile.sources/LIBDRM_FREEDRENO_H_FILES
+
+FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_freedreno.so} | awk '{print $3}'| while read func; do
+( grep -q "^$func$" || echo $func ) <<EOF
+__bss_start
+_edata
+_end
+_fini
+_init
+fd_bo_cpu_fini
+fd_bo_cpu_prep
+fd_bo_del
+fd_bo_dmabuf
+fd_bo_from_dmabuf
+fd_bo_from_fbdev
+fd_bo_from_handle
+fd_bo_from_name
+fd_bo_get_name
+fd_bo_handle
+fd_bo_map
+fd_bo_new
+fd_bo_ref
+fd_bo_size
+fd_device_del
+fd_device_fd
+fd_device_new
+fd_device_new_dup
+fd_device_ref
+fd_device_version
+fd_pipe_del
+fd_pipe_get_param
+fd_pipe_new
+fd_pipe_wait
+fd_pipe_wait_timeout
+fd_ringbuffer_cmd_count
+fd_ringbuffer_del
+fd_ringbuffer_emit_reloc_ring
+fd_ringbuffer_emit_reloc_ring_full
+fd_ringbuffer_flush
+fd_ringbuffer_grow
+fd_ringbuffer_new
+fd_ringbuffer_reloc
+fd_ringbuffer_reset
+fd_ringbuffer_set_parent
+fd_ringbuffer_timestamp
+fd_ringmarker_del
+fd_ringmarker_dwords
+fd_ringmarker_flush
+fd_ringmarker_mark
+fd_ringmarker_new
+EOF
+done)
+
+test ! -n "$FUNCS" || echo $FUNCS
+test ! -n "$FUNCS"
diff --git a/chromium/third_party/libdrm/src/freedreno/freedreno_bo.c b/chromium/third_party/libdrm/src/freedreno/freedreno_bo.c
new file mode 100644
index 00000000000..996d6b95c0c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/freedreno_bo.c
@@ -0,0 +1,323 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "freedreno_drmif.h"
+#include "freedreno_priv.h"
+
+drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
+drm_private void bo_del(struct fd_bo *bo);
+
+/* set buffer name, and add to table, call w/ table_lock held: */
+static void set_name(struct fd_bo *bo, uint32_t name)
+{
+ bo->name = name;
+ /* add ourself into the handle table: */
+ drmHashInsert(bo->dev->name_table, name, bo);
+}
+
+/* lookup a buffer, call w/ table_lock held: */
+static struct fd_bo * lookup_bo(void *tbl, uint32_t key)
+{
+ struct fd_bo *bo = NULL;
+ if (!drmHashLookup(tbl, key, (void **)&bo)) {
+ /* found, incr refcnt and return: */
+ bo = fd_bo_ref(bo);
+
+ /* don't break the bucket if this bo was found in one */
+ list_delinit(&bo->list);
+ }
+ return bo;
+}
+
+/* allocate a new buffer object, call w/ table_lock held */
+static struct fd_bo * bo_from_handle(struct fd_device *dev,
+ uint32_t size, uint32_t handle)
+{
+ struct fd_bo *bo;
+
+ bo = dev->funcs->bo_from_handle(dev, size, handle);
+ if (!bo) {
+ struct drm_gem_close req = {
+ .handle = handle,
+ };
+ drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ return NULL;
+ }
+ bo->dev = fd_device_ref(dev);
+ bo->size = size;
+ bo->handle = handle;
+ atomic_set(&bo->refcnt, 1);
+ list_inithead(&bo->list);
+ /* add ourself into the handle table: */
+ drmHashInsert(dev->handle_table, handle, bo);
+ return bo;
+}
+
+struct fd_bo *
+fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
+{
+ struct fd_bo *bo = NULL;
+ uint32_t handle;
+ int ret;
+
+ bo = fd_bo_cache_alloc(&dev->bo_cache, &size, flags);
+ if (bo)
+ return bo;
+
+ ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
+ if (ret)
+ return NULL;
+
+ pthread_mutex_lock(&table_lock);
+ bo = bo_from_handle(dev, size, handle);
+ bo->bo_reuse = TRUE;
+ pthread_mutex_unlock(&table_lock);
+
+ return bo;
+}
+
+struct fd_bo *
+fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
+{
+ struct fd_bo *bo = NULL;
+
+ pthread_mutex_lock(&table_lock);
+
+ bo = lookup_bo(dev->handle_table, handle);
+ if (bo)
+ goto out_unlock;
+
+ bo = bo_from_handle(dev, size, handle);
+
+out_unlock:
+ pthread_mutex_unlock(&table_lock);
+
+ return bo;
+}
+
+struct fd_bo *
+fd_bo_from_dmabuf(struct fd_device *dev, int fd)
+{
+ int ret, size;
+ uint32_t handle;
+ struct fd_bo *bo;
+
+ pthread_mutex_lock(&table_lock);
+ ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
+ if (ret) {
+ return NULL;
+ }
+
+ bo = lookup_bo(dev->handle_table, handle);
+ if (bo)
+ goto out_unlock;
+
+ /* lseek() to get bo size */
+ size = lseek(fd, 0, SEEK_END);
+ lseek(fd, 0, SEEK_CUR);
+
+ bo = bo_from_handle(dev, size, handle);
+
+out_unlock:
+ pthread_mutex_unlock(&table_lock);
+
+ return bo;
+}
+
+struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
+{
+ struct drm_gem_open req = {
+ .name = name,
+ };
+ struct fd_bo *bo;
+
+ pthread_mutex_lock(&table_lock);
+
+ /* check name table first, to see if bo is already open: */
+ bo = lookup_bo(dev->name_table, name);
+ if (bo)
+ goto out_unlock;
+
+ if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
+ ERROR_MSG("gem-open failed: %s", strerror(errno));
+ goto out_unlock;
+ }
+
+ bo = lookup_bo(dev->handle_table, req.handle);
+ if (bo)
+ goto out_unlock;
+
+ bo = bo_from_handle(dev, req.size, req.handle);
+ if (bo)
+ set_name(bo, name);
+
+out_unlock:
+ pthread_mutex_unlock(&table_lock);
+
+ return bo;
+}
+
+struct fd_bo * fd_bo_ref(struct fd_bo *bo)
+{
+ atomic_inc(&bo->refcnt);
+ return bo;
+}
+
+void fd_bo_del(struct fd_bo *bo)
+{
+ struct fd_device *dev = bo->dev;
+
+ if (!atomic_dec_and_test(&bo->refcnt))
+ return;
+
+ pthread_mutex_lock(&table_lock);
+
+ if (bo->bo_reuse && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
+ goto out;
+
+ bo_del(bo);
+ fd_device_del_locked(dev);
+out:
+ pthread_mutex_unlock(&table_lock);
+}
+
+/* Called under table_lock */
+drm_private void bo_del(struct fd_bo *bo)
+{
+ if (bo->map)
+ drm_munmap(bo->map, bo->size);
+
+ /* TODO probably bo's in bucket list get removed from
+ * handle table??
+ */
+
+ if (bo->handle) {
+ struct drm_gem_close req = {
+ .handle = bo->handle,
+ };
+ drmHashDelete(bo->dev->handle_table, bo->handle);
+ if (bo->name)
+ drmHashDelete(bo->dev->name_table, bo->name);
+ drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ }
+
+ bo->funcs->destroy(bo);
+}
+
+int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
+{
+ if (!bo->name) {
+ struct drm_gem_flink req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
+ if (ret) {
+ return ret;
+ }
+
+ pthread_mutex_lock(&table_lock);
+ set_name(bo, req.name);
+ pthread_mutex_unlock(&table_lock);
+ bo->bo_reuse = FALSE;
+ }
+
+ *name = bo->name;
+
+ return 0;
+}
+
+uint32_t fd_bo_handle(struct fd_bo *bo)
+{
+ return bo->handle;
+}
+
+int fd_bo_dmabuf(struct fd_bo *bo)
+{
+ int ret, prime_fd;
+
+ ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
+ &prime_fd);
+ if (ret) {
+ ERROR_MSG("failed to get dmabuf fd: %d", ret);
+ return ret;
+ }
+
+ bo->bo_reuse = FALSE;
+
+ return prime_fd;
+}
+
+uint32_t fd_bo_size(struct fd_bo *bo)
+{
+ return bo->size;
+}
+
+void * fd_bo_map(struct fd_bo *bo)
+{
+ if (!bo->map) {
+ uint64_t offset;
+ int ret;
+
+ ret = bo->funcs->offset(bo, &offset);
+ if (ret) {
+ return NULL;
+ }
+
+ bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bo->dev->fd, offset);
+ if (bo->map == MAP_FAILED) {
+ ERROR_MSG("mmap failed: %s", strerror(errno));
+ bo->map = NULL;
+ }
+ }
+ return bo->map;
+}
+
+/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
+int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
+{
+ return bo->funcs->cpu_prep(bo, pipe, op);
+}
+
+void fd_bo_cpu_fini(struct fd_bo *bo)
+{
+ bo->funcs->cpu_fini(bo);
+}
+
+#ifndef HAVE_FREEDRENO_KGSL
+struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
+{
+ return NULL;
+}
+#endif
diff --git a/chromium/third_party/libdrm/src/freedreno/freedreno_bo_cache.c b/chromium/third_party/libdrm/src/freedreno/freedreno_bo_cache.c
new file mode 100644
index 00000000000..7becb0d6410
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/freedreno_bo_cache.c
@@ -0,0 +1,222 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "freedreno_drmif.h"
+#include "freedreno_priv.h"
+
+
+drm_private void bo_del(struct fd_bo *bo);
+drm_private extern pthread_mutex_t table_lock;
+
+static void
+add_bucket(struct fd_bo_cache *cache, int size)
+{
+ unsigned int i = cache->num_buckets;
+
+ assert(i < ARRAY_SIZE(cache->cache_bucket));
+
+ list_inithead(&cache->cache_bucket[i].list);
+ cache->cache_bucket[i].size = size;
+ cache->num_buckets++;
+}
+
+/**
+ * @coarse: if true, only power-of-two bucket sizes, otherwise
+ * fill in for a bit smoother size curve..
+ */
+drm_private void
+fd_bo_cache_init(struct fd_bo_cache *cache, int course)
+{
+ unsigned long size, cache_max_size = 64 * 1024 * 1024;
+
+ /* OK, so power of two buckets was too wasteful of memory.
+ * Give 3 other sizes between each power of two, to hopefully
+ * cover things accurately enough. (The alternative is
+ * probably to just go for exact matching of sizes, and assume
+ * that for things like composited window resize the tiled
+ * width/height alignment and rounding of sizes to pages will
+ * get us useful cache hit rates anyway)
+ */
+ add_bucket(cache, 4096);
+ add_bucket(cache, 4096 * 2);
+ if (!course)
+ add_bucket(cache, 4096 * 3);
+
+ /* Initialize the linked lists for BO reuse cache. */
+ for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
+ add_bucket(cache, size);
+ if (!course) {
+ add_bucket(cache, size + size * 1 / 4);
+ add_bucket(cache, size + size * 2 / 4);
+ add_bucket(cache, size + size * 3 / 4);
+ }
+ }
+}
+
+/* Frees older cached buffers. Called under table_lock */
+drm_private void
+fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
+{
+ int i;
+
+ if (cache->time == time)
+ return;
+
+ for (i = 0; i < cache->num_buckets; i++) {
+ struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
+ struct fd_bo *bo;
+
+ while (!LIST_IS_EMPTY(&bucket->list)) {
+ bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
+
+ /* keep things in cache for at least 1 second: */
+ if (time && ((time - bo->free_time) <= 1))
+ break;
+
+ list_del(&bo->list);
+ bo_del(bo);
+ }
+ }
+
+ cache->time = time;
+}
+
+static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
+{
+ int i;
+
+ /* hmm, this is what intel does, but I suppose we could calculate our
+ * way to the correct bucket size rather than looping..
+ */
+ for (i = 0; i < cache->num_buckets; i++) {
+ struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
+ if (bucket->size >= size) {
+ return bucket;
+ }
+ }
+
+ return NULL;
+}
+
+static int is_idle(struct fd_bo *bo)
+{
+ return fd_bo_cpu_prep(bo, NULL,
+ DRM_FREEDRENO_PREP_READ |
+ DRM_FREEDRENO_PREP_WRITE |
+ DRM_FREEDRENO_PREP_NOSYNC) == 0;
+}
+
+static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
+{
+ struct fd_bo *bo = NULL;
+
+ /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
+ * skip the busy check.. if it is only going to be a render target
+ * then we probably don't need to stall..
+ *
+ * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
+ * (MRU, since likely to be in GPU cache), rather than head (LRU)..
+ */
+ pthread_mutex_lock(&table_lock);
+ if (!LIST_IS_EMPTY(&bucket->list)) {
+ bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
+ /* TODO check for compatible flags? */
+ if (is_idle(bo)) {
+ list_del(&bo->list);
+ } else {
+ bo = NULL;
+ }
+ }
+ pthread_mutex_unlock(&table_lock);
+
+ return bo;
+}
+
+/* NOTE: size is potentially rounded up to bucket size: */
+drm_private struct fd_bo *
+fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
+{
+ struct fd_bo *bo = NULL;
+ struct fd_bo_bucket *bucket;
+
+ *size = ALIGN(*size, 4096);
+ bucket = get_bucket(cache, *size);
+
+ /* see if we can be green and recycle: */
+retry:
+ if (bucket) {
+ *size = bucket->size;
+ bo = find_in_bucket(bucket, flags);
+ if (bo) {
+ if (bo->funcs->madvise(bo, TRUE) <= 0) {
+ /* we've lost the backing pages, delete and try again: */
+ pthread_mutex_lock(&table_lock);
+ bo_del(bo);
+ pthread_mutex_unlock(&table_lock);
+ goto retry;
+ }
+ atomic_set(&bo->refcnt, 1);
+ fd_device_ref(bo->dev);
+ return bo;
+ }
+ }
+
+ return NULL;
+}
+
+drm_private int
+fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
+{
+ struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
+
+ /* see if we can be green and recycle: */
+ if (bucket) {
+ struct timespec time;
+
+ bo->funcs->madvise(bo, FALSE);
+
+ clock_gettime(CLOCK_MONOTONIC, &time);
+
+ bo->free_time = time.tv_sec;
+ list_addtail(&bo->list, &bucket->list);
+ fd_bo_cache_cleanup(cache, time.tv_sec);
+
+ /* bo's in the bucket cache don't have a ref and
+ * don't hold a ref to the dev:
+ */
+ fd_device_del_locked(bo->dev);
+
+ return 0;
+ }
+
+ return -1;
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/freedreno_device.c b/chromium/third_party/libdrm/src/freedreno/freedreno_device.c
new file mode 100644
index 00000000000..fcbf1402ad6
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/freedreno_device.c
@@ -0,0 +1,147 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "freedreno_drmif.h"
+#include "freedreno_priv.h"
+
+static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
+
+struct fd_device * kgsl_device_new(int fd);
+struct fd_device * msm_device_new(int fd);
+
+struct fd_device * fd_device_new(int fd)
+{
+ struct fd_device *dev;
+ drmVersionPtr version;
+
+ /* figure out if we are kgsl or msm drm driver: */
+ version = drmGetVersion(fd);
+ if (!version) {
+ ERROR_MSG("cannot get version: %s", strerror(errno));
+ return NULL;
+ }
+
+ if (!strcmp(version->name, "msm")) {
+ DEBUG_MSG("msm DRM device");
+ if (version->version_major != 1) {
+ ERROR_MSG("unsupported version: %u.%u.%u", version->version_major,
+ version->version_minor, version->version_patchlevel);
+ dev = NULL;
+ goto out;
+ }
+
+ dev = msm_device_new(fd);
+ dev->version = version->version_minor;
+#ifdef HAVE_FREEDRENO_KGSL
+ } else if (!strcmp(version->name, "kgsl")) {
+ DEBUG_MSG("kgsl DRM device");
+ dev = kgsl_device_new(fd);
+#endif
+ } else {
+ ERROR_MSG("unknown device: %s", version->name);
+ dev = NULL;
+ }
+
+out:
+ drmFreeVersion(version);
+
+ if (!dev)
+ return NULL;
+
+ atomic_set(&dev->refcnt, 1);
+ dev->fd = fd;
+ dev->handle_table = drmHashCreate();
+ dev->name_table = drmHashCreate();
+ fd_bo_cache_init(&dev->bo_cache, FALSE);
+
+ return dev;
+}
+
+/* like fd_device_new() but creates it's own private dup() of the fd
+ * which is close()d when the device is finalized.
+ */
+struct fd_device * fd_device_new_dup(int fd)
+{
+ int dup_fd = dup(fd);
+ struct fd_device *dev = fd_device_new(dup_fd);
+ if (dev)
+ dev->closefd = 1;
+ else
+ close(dup_fd);
+ return dev;
+}
+
+struct fd_device * fd_device_ref(struct fd_device *dev)
+{
+ atomic_inc(&dev->refcnt);
+ return dev;
+}
+
+static void fd_device_del_impl(struct fd_device *dev)
+{
+ fd_bo_cache_cleanup(&dev->bo_cache, 0);
+ drmHashDestroy(dev->handle_table);
+ drmHashDestroy(dev->name_table);
+ if (dev->closefd)
+ close(dev->fd);
+ dev->funcs->destroy(dev);
+}
+
+drm_private void fd_device_del_locked(struct fd_device *dev)
+{
+ if (!atomic_dec_and_test(&dev->refcnt))
+ return;
+ fd_device_del_impl(dev);
+}
+
+void fd_device_del(struct fd_device *dev)
+{
+ if (!atomic_dec_and_test(&dev->refcnt))
+ return;
+ pthread_mutex_lock(&table_lock);
+ fd_device_del_impl(dev);
+ pthread_mutex_unlock(&table_lock);
+}
+
+int fd_device_fd(struct fd_device *dev)
+{
+ return dev->fd;
+}
+
+enum fd_version fd_device_version(struct fd_device *dev)
+{
+ return dev->version;
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/freedreno_drmif.h b/chromium/third_party/libdrm/src/freedreno/freedreno_drmif.h
new file mode 100644
index 00000000000..2d913e6ca7f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/freedreno_drmif.h
@@ -0,0 +1,132 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifndef FREEDRENO_DRMIF_H_
+#define FREEDRENO_DRMIF_H_
+
+#include <xf86drm.h>
+#include <stdint.h>
+
+#if defined(__GNUC__)
+# define drm_deprecated __attribute__((__deprecated__))
+#else
+# define drm_deprecated
+#endif
+
+/* an empty marker for things that will be deprecated in the future: */
+#define will_be_deprecated
+
+struct fd_bo;
+struct fd_pipe;
+struct fd_device;
+
+enum fd_pipe_id {
+ FD_PIPE_3D = 1,
+ FD_PIPE_2D = 2,
+ /* some devices have two 2d blocks.. not really sure how to
+ * use that yet, so just ignoring the 2nd 2d pipe for now
+ */
+ FD_PIPE_MAX
+};
+
+enum fd_param_id {
+ FD_DEVICE_ID,
+ FD_GMEM_SIZE,
+ FD_GPU_ID,
+ FD_CHIP_ID,
+ FD_MAX_FREQ,
+ FD_TIMESTAMP,
+};
+
+/* bo flags: */
+#define DRM_FREEDRENO_GEM_TYPE_SMI 0x00000001
+#define DRM_FREEDRENO_GEM_TYPE_KMEM 0x00000002
+#define DRM_FREEDRENO_GEM_TYPE_MEM_MASK 0x0000000f
+#define DRM_FREEDRENO_GEM_CACHE_NONE 0x00000000
+#define DRM_FREEDRENO_GEM_CACHE_WCOMBINE 0x00100000
+#define DRM_FREEDRENO_GEM_CACHE_WTHROUGH 0x00200000
+#define DRM_FREEDRENO_GEM_CACHE_WBACK 0x00400000
+#define DRM_FREEDRENO_GEM_CACHE_WBACKWA 0x00800000
+#define DRM_FREEDRENO_GEM_CACHE_MASK 0x00f00000
+#define DRM_FREEDRENO_GEM_GPUREADONLY 0x01000000
+
+/* bo access flags: (keep aligned to MSM_PREP_x) */
+#define DRM_FREEDRENO_PREP_READ 0x01
+#define DRM_FREEDRENO_PREP_WRITE 0x02
+#define DRM_FREEDRENO_PREP_NOSYNC 0x04
+
+/* device functions:
+ */
+
+struct fd_device * fd_device_new(int fd);
+struct fd_device * fd_device_new_dup(int fd);
+struct fd_device * fd_device_ref(struct fd_device *dev);
+void fd_device_del(struct fd_device *dev);
+int fd_device_fd(struct fd_device *dev);
+
+enum fd_version {
+ FD_VERSION_MADVISE = 1, /* kernel supports madvise */
+ FD_VERSION_UNLIMITED_CMDS = 1, /* submits w/ >4 cmd buffers (growable ringbuffer) */
+};
+enum fd_version fd_device_version(struct fd_device *dev);
+
+/* pipe functions:
+ */
+
+struct fd_pipe * fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id);
+void fd_pipe_del(struct fd_pipe *pipe);
+int fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
+ uint64_t *value);
+int fd_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp);
+/* timeout in nanosec */
+int fd_pipe_wait_timeout(struct fd_pipe *pipe, uint32_t timestamp,
+ uint64_t timeout);
+
+
+/* buffer-object functions:
+ */
+
+struct fd_bo * fd_bo_new(struct fd_device *dev,
+ uint32_t size, uint32_t flags);
+struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe,
+ int fbfd, uint32_t size);
+struct fd_bo *fd_bo_from_handle(struct fd_device *dev,
+ uint32_t handle, uint32_t size);
+struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name);
+struct fd_bo * fd_bo_from_dmabuf(struct fd_device *dev, int fd);
+struct fd_bo * fd_bo_ref(struct fd_bo *bo);
+void fd_bo_del(struct fd_bo *bo);
+int fd_bo_get_name(struct fd_bo *bo, uint32_t *name);
+uint32_t fd_bo_handle(struct fd_bo *bo);
+int fd_bo_dmabuf(struct fd_bo *bo);
+uint32_t fd_bo_size(struct fd_bo *bo);
+void * fd_bo_map(struct fd_bo *bo);
+int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
+void fd_bo_cpu_fini(struct fd_bo *bo);
+
+#endif /* FREEDRENO_DRMIF_H_ */
diff --git a/chromium/third_party/libdrm/src/freedreno/freedreno_pipe.c b/chromium/third_party/libdrm/src/freedreno/freedreno_pipe.c
new file mode 100644
index 00000000000..4a756d7027c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/freedreno_pipe.c
@@ -0,0 +1,82 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "freedreno_drmif.h"
+#include "freedreno_priv.h"
+
+struct fd_pipe *
+fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
+{
+ struct fd_pipe *pipe = NULL;
+
+ if (id > FD_PIPE_MAX) {
+ ERROR_MSG("invalid pipe id: %d", id);
+ goto fail;
+ }
+
+ pipe = dev->funcs->pipe_new(dev, id);
+ if (!pipe) {
+ ERROR_MSG("allocation failed");
+ goto fail;
+ }
+
+ pipe->dev = dev;
+ pipe->id = id;
+
+ return pipe;
+fail:
+ if (pipe)
+ fd_pipe_del(pipe);
+ return NULL;
+}
+
+void fd_pipe_del(struct fd_pipe *pipe)
+{
+ pipe->funcs->destroy(pipe);
+}
+
+int fd_pipe_get_param(struct fd_pipe *pipe,
+ enum fd_param_id param, uint64_t *value)
+{
+ return pipe->funcs->get_param(pipe, param, value);
+}
+
+int fd_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp)
+{
+ return fd_pipe_wait_timeout(pipe, timestamp, ~0);
+}
+
+int fd_pipe_wait_timeout(struct fd_pipe *pipe, uint32_t timestamp,
+ uint64_t timeout)
+{
+ return pipe->funcs->wait(pipe, timestamp, timeout);
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/freedreno_priv.h b/chromium/third_party/libdrm/src/freedreno/freedreno_priv.h
new file mode 100644
index 00000000000..cdfdbe8dbd7
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/freedreno_priv.h
@@ -0,0 +1,197 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifndef FREEDRENO_PRIV_H_
+#define FREEDRENO_PRIV_H_
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "xf86atomic.h"
+
+#include "util_double_list.h"
+
+#include "freedreno_drmif.h"
+#include "freedreno_ringbuffer.h"
+#include "drm.h"
+
+#ifndef TRUE
+# define TRUE 1
+#endif
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+struct fd_device_funcs {
+ int (*bo_new_handle)(struct fd_device *dev, uint32_t size,
+ uint32_t flags, uint32_t *handle);
+ struct fd_bo * (*bo_from_handle)(struct fd_device *dev,
+ uint32_t size, uint32_t handle);
+ struct fd_pipe * (*pipe_new)(struct fd_device *dev, enum fd_pipe_id id);
+ void (*destroy)(struct fd_device *dev);
+};
+
+struct fd_bo_bucket {
+ uint32_t size;
+ struct list_head list;
+};
+
+struct fd_bo_cache {
+ struct fd_bo_bucket cache_bucket[14 * 4];
+ int num_buckets;
+ time_t time;
+};
+
+struct fd_device {
+ int fd;
+ enum fd_version version;
+ atomic_t refcnt;
+
+ /* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
+ *
+ * handle_table: maps handle to fd_bo
+ * name_table: maps flink name to fd_bo
+ *
+ * We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
+ * returns a new handle. So we need to figure out if the bo is already
+ * open in the process first, before calling gem-open.
+ */
+ void *handle_table, *name_table;
+
+ const struct fd_device_funcs *funcs;
+
+ struct fd_bo_cache bo_cache;
+
+ int closefd; /* call close(fd) upon destruction */
+};
+
+drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
+drm_private void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
+drm_private struct fd_bo * fd_bo_cache_alloc(struct fd_bo_cache *cache,
+ uint32_t *size, uint32_t flags);
+drm_private int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
+
+/* for where @table_lock is already held: */
+drm_private void fd_device_del_locked(struct fd_device *dev);
+
+struct fd_pipe_funcs {
+ struct fd_ringbuffer * (*ringbuffer_new)(struct fd_pipe *pipe, uint32_t size);
+ int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value);
+ int (*wait)(struct fd_pipe *pipe, uint32_t timestamp, uint64_t timeout);
+ void (*destroy)(struct fd_pipe *pipe);
+};
+
+struct fd_pipe {
+ struct fd_device *dev;
+ enum fd_pipe_id id;
+ const struct fd_pipe_funcs *funcs;
+};
+
+struct fd_ringmarker {
+ struct fd_ringbuffer *ring;
+ uint32_t *cur;
+};
+
+struct fd_ringbuffer_funcs {
+ void * (*hostptr)(struct fd_ringbuffer *ring);
+ int (*flush)(struct fd_ringbuffer *ring, uint32_t *last_start);
+ void (*grow)(struct fd_ringbuffer *ring, uint32_t size);
+ void (*reset)(struct fd_ringbuffer *ring);
+ void (*emit_reloc)(struct fd_ringbuffer *ring,
+ const struct fd_reloc *reloc);
+ uint32_t (*emit_reloc_ring)(struct fd_ringbuffer *ring,
+ struct fd_ringbuffer *target, uint32_t cmd_idx,
+ uint32_t submit_offset, uint32_t size);
+ uint32_t (*cmd_count)(struct fd_ringbuffer *ring);
+ void (*destroy)(struct fd_ringbuffer *ring);
+};
+
+struct fd_bo_funcs {
+ int (*offset)(struct fd_bo *bo, uint64_t *offset);
+ int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
+ void (*cpu_fini)(struct fd_bo *bo);
+ int (*madvise)(struct fd_bo *bo, int willneed);
+ void (*destroy)(struct fd_bo *bo);
+};
+
+struct fd_bo {
+ struct fd_device *dev;
+ uint32_t size;
+ uint32_t handle;
+ uint32_t name;
+ void *map;
+ atomic_t refcnt;
+ const struct fd_bo_funcs *funcs;
+
+ int bo_reuse;
+ struct list_head list; /* bucket-list entry */
+ time_t free_time; /* time when added to bucket-list */
+};
+
+#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define enable_debug 0 /* TODO make dynamic */
+
+#define INFO_MSG(fmt, ...) \
+ do { drmMsg("[I] "fmt " (%s:%d)\n", \
+ ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
+#define DEBUG_MSG(fmt, ...) \
+ do if (enable_debug) { drmMsg("[D] "fmt " (%s:%d)\n", \
+ ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
+#define WARN_MSG(fmt, ...) \
+ do { drmMsg("[W] "fmt " (%s:%d)\n", \
+ ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
+#define ERROR_MSG(fmt, ...) \
+ do { drmMsg("[E] " fmt " (%s:%d)\n", \
+ ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
+
+#define U642VOID(x) ((void *)(unsigned long)(x))
+#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
+
+static inline uint32_t
+offset_bytes(void *end, void *start)
+{
+ return ((char *)end) - ((char *)start);
+}
+
+#endif /* FREEDRENO_PRIV_H_ */
diff --git a/chromium/third_party/libdrm/src/freedreno/freedreno_ringbuffer.c b/chromium/third_party/libdrm/src/freedreno/freedreno_ringbuffer.c
new file mode 100644
index 00000000000..22dafb394f2
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/freedreno_ringbuffer.c
@@ -0,0 +1,181 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <assert.h>
+
+#include "freedreno_drmif.h"
+#include "freedreno_priv.h"
+#include "freedreno_ringbuffer.h"
+
+struct fd_ringbuffer *
+fd_ringbuffer_new(struct fd_pipe *pipe, uint32_t size)
+{
+ struct fd_ringbuffer *ring;
+
+ ring = pipe->funcs->ringbuffer_new(pipe, size);
+ if (!ring)
+ return NULL;
+
+ ring->pipe = pipe;
+ ring->start = ring->funcs->hostptr(ring);
+ ring->end = &(ring->start[ring->size/4]);
+
+ ring->cur = ring->last_start = ring->start;
+
+ return ring;
+}
+
+void fd_ringbuffer_del(struct fd_ringbuffer *ring)
+{
+ fd_ringbuffer_reset(ring);
+ ring->funcs->destroy(ring);
+}
+
+/* ringbuffers which are IB targets should set the toplevel rb (ie.
+ * the IB source) as it's parent before emitting reloc's, to ensure
+ * the bookkeeping works out properly.
+ */
+void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
+ struct fd_ringbuffer *parent)
+{
+ ring->parent = parent;
+}
+
+void fd_ringbuffer_reset(struct fd_ringbuffer *ring)
+{
+ uint32_t *start = ring->start;
+ if (ring->pipe->id == FD_PIPE_2D)
+ start = &ring->start[0x140];
+ ring->cur = ring->last_start = start;
+ if (ring->funcs->reset)
+ ring->funcs->reset(ring);
+}
+
+/* maybe get rid of this and use fd_ringmarker_flush() from DDX too? */
+int fd_ringbuffer_flush(struct fd_ringbuffer *ring)
+{
+ return ring->funcs->flush(ring, ring->last_start);
+}
+
+void fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords)
+{
+ assert(ring->funcs->grow); /* unsupported on kgsl */
+
+ /* there is an upper bound on IB size, which appears to be 0x100000 */
+ if (ring->size < 0x100000)
+ ring->size *= 2;
+
+ ring->funcs->grow(ring, ring->size);
+
+ ring->start = ring->funcs->hostptr(ring);
+ ring->end = &(ring->start[ring->size/4]);
+
+ ring->cur = ring->last_start = ring->start;
+}
+
+uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring)
+{
+ return ring->last_timestamp;
+}
+
+void fd_ringbuffer_reloc(struct fd_ringbuffer *ring,
+ const struct fd_reloc *reloc)
+{
+ ring->funcs->emit_reloc(ring, reloc);
+}
+
+void fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
+ struct fd_ringmarker *target, struct fd_ringmarker *end)
+{
+ uint32_t submit_offset, size;
+
+ assert(target->ring == end->ring);
+
+ submit_offset = offset_bytes(target->cur, target->ring->start);
+ size = offset_bytes(end->cur, target->cur);
+
+ ring->funcs->emit_reloc_ring(ring, target->ring, 0, submit_offset, size);
+}
+
+uint32_t fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
+{
+ if (!ring->funcs->cmd_count)
+ return 1;
+ return ring->funcs->cmd_count(ring);
+}
+
+uint32_t
+fd_ringbuffer_emit_reloc_ring_full(struct fd_ringbuffer *ring,
+ struct fd_ringbuffer *target, uint32_t cmd_idx)
+{
+ uint32_t size = offset_bytes(target->cur, target->start);
+ return ring->funcs->emit_reloc_ring(ring, target, cmd_idx, 0, size);
+}
+
+struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring)
+{
+ struct fd_ringmarker *marker = NULL;
+
+ marker = calloc(1, sizeof(*marker));
+ if (!marker) {
+ ERROR_MSG("allocation failed");
+ return NULL;
+ }
+
+ marker->ring = ring;
+
+ marker->cur = marker->ring->cur;
+
+ return marker;
+}
+
+void fd_ringmarker_del(struct fd_ringmarker *marker)
+{
+ free(marker);
+}
+
+void fd_ringmarker_mark(struct fd_ringmarker *marker)
+{
+ marker->cur = marker->ring->cur;
+}
+
+uint32_t fd_ringmarker_dwords(struct fd_ringmarker *start,
+ struct fd_ringmarker *end)
+{
+ return end->cur - start->cur;
+}
+
+int fd_ringmarker_flush(struct fd_ringmarker *marker)
+{
+ struct fd_ringbuffer *ring = marker->ring;
+ return ring->funcs->flush(ring, marker->cur);
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/freedreno_ringbuffer.h b/chromium/third_party/libdrm/src/freedreno/freedreno_ringbuffer.h
new file mode 100644
index 00000000000..8899b5de4a0
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/freedreno_ringbuffer.h
@@ -0,0 +1,92 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifndef FREEDRENO_RINGBUFFER_H_
+#define FREEDRENO_RINGBUFFER_H_
+
+#include <freedreno_drmif.h>
+
+/* the ringbuffer object is not opaque so that OUT_RING() type stuff
+ * can be inlined. Note that users should not make assumptions about
+ * the size of this struct.. more stuff will be added when we eventually
+ * have a kernel driver that can deal w/ reloc's..
+ */
+
+struct fd_ringbuffer_funcs;
+struct fd_ringmarker;
+
+struct fd_ringbuffer {
+ int size;
+ uint32_t *cur, *end, *start, *last_start;
+ struct fd_pipe *pipe;
+ const struct fd_ringbuffer_funcs *funcs;
+ uint32_t last_timestamp;
+ struct fd_ringbuffer *parent;
+};
+
+struct fd_ringbuffer * fd_ringbuffer_new(struct fd_pipe *pipe,
+ uint32_t size);
+void fd_ringbuffer_del(struct fd_ringbuffer *ring);
+void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
+ struct fd_ringbuffer *parent);
+void fd_ringbuffer_reset(struct fd_ringbuffer *ring);
+int fd_ringbuffer_flush(struct fd_ringbuffer *ring);
+void fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords);
+uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring);
+
+static inline void fd_ringbuffer_emit(struct fd_ringbuffer *ring,
+ uint32_t data)
+{
+ (*ring->cur++) = data;
+}
+
+struct fd_reloc {
+ struct fd_bo *bo;
+#define FD_RELOC_READ 0x0001
+#define FD_RELOC_WRITE 0x0002
+ uint32_t flags;
+ uint32_t offset;
+ uint32_t or;
+ int32_t shift;
+};
+
+void fd_ringbuffer_reloc(struct fd_ringbuffer *ring, const struct fd_reloc *reloc);
+will_be_deprecated void fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
+ struct fd_ringmarker *target, struct fd_ringmarker *end);
+uint32_t fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring);
+uint32_t fd_ringbuffer_emit_reloc_ring_full(struct fd_ringbuffer *ring,
+ struct fd_ringbuffer *target, uint32_t cmd_idx);
+
+will_be_deprecated struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring);
+will_be_deprecated void fd_ringmarker_del(struct fd_ringmarker *marker);
+will_be_deprecated void fd_ringmarker_mark(struct fd_ringmarker *marker);
+will_be_deprecated uint32_t fd_ringmarker_dwords(struct fd_ringmarker *start,
+ struct fd_ringmarker *end);
+will_be_deprecated int fd_ringmarker_flush(struct fd_ringmarker *marker);
+
+#endif /* FREEDRENO_RINGBUFFER_H_ */
diff --git a/chromium/third_party/libdrm/src/freedreno/kgsl/README b/chromium/third_party/libdrm/src/freedreno/kgsl/README
new file mode 100644
index 00000000000..56874b42ff2
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/kgsl/README
@@ -0,0 +1,26 @@
+This is a historical discription of what is now the kgsl backend
+in libdrm freedreno (before the upstream drm/msm driver). Note
+that the kgsl backend requires the "kgsl-drm" shim driver, which
+usually is in disrepair (QCOM does not build it for android), and
+due to random differences between different downstream android
+kernel branches it may or may not work. So YMMV.
+
+Original README:
+----------------
+
+Note that current msm kernel driver is a bit strange. It provides a
+DRM interface for GEM, which is basically sufficient to have DRI2
+working. But it does not provide KMS. And interface to 2d and 3d
+cores is via different other devices (/dev/kgsl-*). This is not
+quite how I'd write a DRM driver, but at this stage it is useful for
+xf86-video-freedreno and fdre (and eventual gallium driver) to be
+able to work on existing kernel driver from QCOM, to allow to
+capture cmdstream dumps from the binary blob drivers without having
+to reboot. So libdrm_freedreno attempts to hide most of the crazy.
+The intention is that when there is a proper kernel driver, it will
+be mostly just changes in libdrm_freedreno to adapt the gallium
+driver and xf86-video-freedreno (ignoring the fbdev->KMS changes).
+
+So don't look at freedreno as an example of how to write a libdrm
+module or a DRM driver.. it is just an attempt to paper over a non-
+standard kernel driver architecture.
diff --git a/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_bo.c b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_bo.c
new file mode 100644
index 00000000000..ab3485e36b1
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_bo.c
@@ -0,0 +1,315 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "kgsl_priv.h"
+
+#include <linux/fb.h>
+
+static int set_memtype(struct fd_device *dev, uint32_t handle, uint32_t flags)
+{
+ struct drm_kgsl_gem_memtype req = {
+ .handle = handle,
+ .type = flags & DRM_FREEDRENO_GEM_TYPE_MEM_MASK,
+ };
+
+ return drmCommandWrite(dev->fd, DRM_KGSL_GEM_SETMEMTYPE,
+ &req, sizeof(req));
+}
+
+static int bo_alloc(struct kgsl_bo *kgsl_bo)
+{
+ struct fd_bo *bo = &kgsl_bo->base;
+ if (!kgsl_bo->offset) {
+ struct drm_kgsl_gem_alloc req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ /* if the buffer is already backed by pages then this
+ * doesn't actually do anything (other than giving us
+ * the offset)
+ */
+ ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_ALLOC,
+ &req, sizeof(req));
+ if (ret) {
+ ERROR_MSG("alloc failed: %s", strerror(errno));
+ return ret;
+ }
+
+ kgsl_bo->offset = req.offset;
+ }
+
+ return 0;
+}
+
+static int kgsl_bo_offset(struct fd_bo *bo, uint64_t *offset)
+{
+ struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
+ int ret = bo_alloc(kgsl_bo);
+ if (ret)
+ return ret;
+ *offset = kgsl_bo->offset;
+ return 0;
+}
+
+static int kgsl_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
+{
+ uint32_t timestamp = kgsl_bo_get_timestamp(to_kgsl_bo(bo));
+
+ if (op & DRM_FREEDRENO_PREP_NOSYNC) {
+ uint32_t current;
+ int ret;
+
+ /* special case for is_idle().. we can't really handle that
+ * properly in kgsl (perhaps we need a way to just disable
+ * the bo-cache for kgsl?)
+ */
+ if (!pipe)
+ return -EBUSY;
+
+ ret = kgsl_pipe_timestamp(to_kgsl_pipe(pipe), &current);
+ if (ret)
+ return ret;
+
+ if (timestamp > current)
+ return -EBUSY;
+
+ return 0;
+ }
+
+ if (timestamp)
+ fd_pipe_wait(pipe, timestamp);
+
+ return 0;
+}
+
+static void kgsl_bo_cpu_fini(struct fd_bo *bo)
+{
+}
+
+static int kgsl_bo_madvise(struct fd_bo *bo, int willneed)
+{
+ return willneed; /* not supported by kgsl */
+}
+
+static void kgsl_bo_destroy(struct fd_bo *bo)
+{
+ struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
+ free(kgsl_bo);
+
+}
+
+static const struct fd_bo_funcs funcs = {
+ .offset = kgsl_bo_offset,
+ .cpu_prep = kgsl_bo_cpu_prep,
+ .cpu_fini = kgsl_bo_cpu_fini,
+ .madvise = kgsl_bo_madvise,
+ .destroy = kgsl_bo_destroy,
+};
+
+/* allocate a buffer handle: */
+drm_private int kgsl_bo_new_handle(struct fd_device *dev,
+ uint32_t size, uint32_t flags, uint32_t *handle)
+{
+ struct drm_kgsl_gem_create req = {
+ .size = size,
+ };
+ int ret;
+
+ ret = drmCommandWriteRead(dev->fd, DRM_KGSL_GEM_CREATE,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ // TODO make flags match msm driver, since kgsl is legacy..
+ // translate flags in kgsl..
+
+ set_memtype(dev, req.handle, flags);
+
+ *handle = req.handle;
+
+ return 0;
+}
+
+/* allocate a new buffer object */
+drm_private struct fd_bo * kgsl_bo_from_handle(struct fd_device *dev,
+ uint32_t size, uint32_t handle)
+{
+ struct kgsl_bo *kgsl_bo;
+ struct fd_bo *bo;
+ unsigned i;
+
+ kgsl_bo = calloc(1, sizeof(*kgsl_bo));
+ if (!kgsl_bo)
+ return NULL;
+
+ bo = &kgsl_bo->base;
+ bo->funcs = &funcs;
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_bo->list); i++)
+ list_inithead(&kgsl_bo->list[i]);
+
+ return bo;
+}
+
+struct fd_bo *
+fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
+{
+ struct fd_bo *bo;
+
+ if (!is_kgsl_pipe(pipe))
+ return NULL;
+
+ bo = fd_bo_new(pipe->dev, 1, 0);
+
+ /* this is fugly, but works around a bug in the kernel..
+ * priv->memdesc.size never gets set, so getbufinfo ioctl
+ * thinks the buffer hasn't be allocate and fails
+ */
+ if (bo) {
+ void *fbmem = drm_mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fbfd, 0);
+ struct kgsl_map_user_mem req = {
+ .memtype = KGSL_USER_MEM_TYPE_ADDR,
+ .len = size,
+ .offset = 0,
+ .hostptr = (unsigned long)fbmem,
+ };
+ struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
+ int ret;
+
+ ret = ioctl(to_kgsl_pipe(pipe)->fd, IOCTL_KGSL_MAP_USER_MEM, &req);
+ if (ret) {
+ ERROR_MSG("mapping user mem failed: %s",
+ strerror(errno));
+ goto fail;
+ }
+ kgsl_bo->gpuaddr = req.gpuaddr;
+ bo->map = fbmem;
+ }
+
+ return bo;
+fail:
+ if (bo)
+ fd_bo_del(bo);
+ return NULL;
+}
+
+drm_private uint32_t kgsl_bo_gpuaddr(struct kgsl_bo *kgsl_bo, uint32_t offset)
+{
+ struct fd_bo *bo = &kgsl_bo->base;
+ if (!kgsl_bo->gpuaddr) {
+ struct drm_kgsl_gem_bufinfo req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ ret = bo_alloc(kgsl_bo);
+ if (ret) {
+ return ret;
+ }
+
+ ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
+ &req, sizeof(req));
+ if (ret) {
+ ERROR_MSG("get bufinfo failed: %s", strerror(errno));
+ return 0;
+ }
+
+ kgsl_bo->gpuaddr = req.gpuaddr[0];
+ }
+ return kgsl_bo->gpuaddr + offset;
+}
+
+/*
+ * Super-cheezy way to synchronization between mesa and ddx.. the
+ * SET_ACTIVE ioctl gives us a way to stash a 32b # w/ a GEM bo, and
+ * GET_BUFINFO gives us a way to retrieve it. We use this to stash
+ * the timestamp of the last ISSUEIBCMDS on the buffer.
+ *
+ * To avoid an obscene amount of syscalls, we:
+ * 1) Only set the timestamp for buffers w/ an flink name, ie.
+ * only buffers shared across processes. This is enough to
+ * catch the DRI2 buffers.
+ * 2) Only set the timestamp for buffers submitted to the 3d ring
+ * and only check the timestamps on buffers submitted to the
+ * 2d ring. This should be enough to handle synchronizing of
+ * presentation blit. We could do synchronization in the other
+ * direction too, but that would be problematic if we are using
+ * the 3d ring from DDX, since client side wouldn't know this.
+ *
+ * The waiting on timestamp happens before flush, and setting of
+ * timestamp happens after flush. It is transparent to the user
+ * of libdrm_freedreno as all the tracking of buffers happens via
+ * _emit_reloc()..
+ */
+
+drm_private void kgsl_bo_set_timestamp(struct kgsl_bo *kgsl_bo,
+ uint32_t timestamp)
+{
+ struct fd_bo *bo = &kgsl_bo->base;
+ if (bo->name) {
+ struct drm_kgsl_gem_active req = {
+ .handle = bo->handle,
+ .active = timestamp,
+ };
+ int ret;
+
+ ret = drmCommandWrite(bo->dev->fd, DRM_KGSL_GEM_SET_ACTIVE,
+ &req, sizeof(req));
+ if (ret) {
+ ERROR_MSG("set active failed: %s", strerror(errno));
+ }
+ }
+}
+
+drm_private uint32_t kgsl_bo_get_timestamp(struct kgsl_bo *kgsl_bo)
+{
+ struct fd_bo *bo = &kgsl_bo->base;
+ uint32_t timestamp = 0;
+ if (bo->name) {
+ struct drm_kgsl_gem_bufinfo req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
+ &req, sizeof(req));
+ if (ret) {
+ ERROR_MSG("get bufinfo failed: %s", strerror(errno));
+ return 0;
+ }
+
+ timestamp = req.active;
+ }
+ return timestamp;
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_device.c b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_device.c
new file mode 100644
index 00000000000..175e83781fe
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_device.c
@@ -0,0 +1,65 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "kgsl_priv.h"
+
+static void kgsl_device_destroy(struct fd_device *dev)
+{
+ struct kgsl_device *kgsl_dev = to_kgsl_device(dev);
+ free(kgsl_dev);
+}
+
+static const struct fd_device_funcs funcs = {
+ .bo_new_handle = kgsl_bo_new_handle,
+ .bo_from_handle = kgsl_bo_from_handle,
+ .pipe_new = kgsl_pipe_new,
+ .destroy = kgsl_device_destroy,
+};
+
+drm_private struct fd_device * kgsl_device_new(int fd)
+{
+ struct kgsl_device *kgsl_dev;
+ struct fd_device *dev;
+
+ kgsl_dev = calloc(1, sizeof(*kgsl_dev));
+ if (!kgsl_dev)
+ return NULL;
+
+ dev = &kgsl_dev->base;
+ dev->funcs = &funcs;
+
+ return dev;
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_drm.h b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_drm.h
new file mode 100644
index 00000000000..281978ea3fc
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_drm.h
@@ -0,0 +1,192 @@
+#ifndef _KGSL_DRM_H_
+#define _KGSL_DRM_H_
+
+#include "drm.h"
+
+#define DRM_KGSL_GEM_CREATE 0x00
+#define DRM_KGSL_GEM_PREP 0x01
+#define DRM_KGSL_GEM_SETMEMTYPE 0x02
+#define DRM_KGSL_GEM_GETMEMTYPE 0x03
+#define DRM_KGSL_GEM_MMAP 0x04
+#define DRM_KGSL_GEM_ALLOC 0x05
+#define DRM_KGSL_GEM_BIND_GPU 0x06
+#define DRM_KGSL_GEM_UNBIND_GPU 0x07
+
+#define DRM_KGSL_GEM_GET_BUFINFO 0x08
+#define DRM_KGSL_GEM_SET_BUFCOUNT 0x09
+#define DRM_KGSL_GEM_SET_ACTIVE 0x0A
+#define DRM_KGSL_GEM_LOCK_HANDLE 0x0B
+#define DRM_KGSL_GEM_UNLOCK_HANDLE 0x0C
+#define DRM_KGSL_GEM_UNLOCK_ON_TS 0x0D
+#define DRM_KGSL_GEM_CREATE_FD 0x0E
+
+#define DRM_IOCTL_KGSL_GEM_CREATE \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_CREATE, struct drm_kgsl_gem_create)
+
+#define DRM_IOCTL_KGSL_GEM_PREP \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_PREP, struct drm_kgsl_gem_prep)
+
+#define DRM_IOCTL_KGSL_GEM_SETMEMTYPE \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SETMEMTYPE, \
+struct drm_kgsl_gem_memtype)
+
+#define DRM_IOCTL_KGSL_GEM_GETMEMTYPE \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_GETMEMTYPE, \
+struct drm_kgsl_gem_memtype)
+
+#define DRM_IOCTL_KGSL_GEM_MMAP \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_MMAP, struct drm_kgsl_gem_mmap)
+
+#define DRM_IOCTL_KGSL_GEM_ALLOC \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_ALLOC, struct drm_kgsl_gem_alloc)
+
+#define DRM_IOCTL_KGSL_GEM_BIND_GPU \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_BIND_GPU, struct drm_kgsl_gem_bind_gpu)
+
+#define DRM_IOCTL_KGSL_GEM_UNBIND_GPU \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_UNBIND_GPU, \
+struct drm_kgsl_gem_bind_gpu)
+
+#define DRM_IOCTL_KGSL_GEM_GET_BUFINFO \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_GET_BUFINFO, \
+ struct drm_kgsl_gem_bufinfo)
+
+#define DRM_IOCTL_KGSL_GEM_SET_BUFCOUNT \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SET_BUFCOUNT, \
+ struct drm_kgsl_gem_bufcount)
+
+#define DRM_IOCTL_KGSL_GEM_SET_ACTIVE \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SET_ACTIVE, \
+ struct drm_kgsl_gem_active)
+
+#define DRM_IOCTL_KGSL_GEM_LOCK_HANDLE \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_LOCK_HANDLE, \
+struct drm_kgsl_gem_lock_handles)
+
+#define DRM_IOCTL_KGSL_GEM_UNLOCK_HANDLE \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_UNLOCK_HANDLE, \
+struct drm_kgsl_gem_unlock_handles)
+
+#define DRM_IOCTL_KGSL_GEM_UNLOCK_ON_TS \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_UNLOCK_ON_TS, \
+struct drm_kgsl_gem_unlock_on_ts)
+
+#define DRM_IOCTL_KGSL_GEM_CREATE_FD \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_CREATE_FD, \
+struct drm_kgsl_gem_create_fd)
+
+/* Maximum number of sub buffers per GEM object */
+#define DRM_KGSL_GEM_MAX_BUFFERS 2
+
+/* Memory types - these define the source and caching policies
+ of the GEM memory chunk */
+
+/* Legacy definitions left for compatibility */
+
+#define DRM_KGSL_GEM_TYPE_EBI 0
+#define DRM_KGSL_GEM_TYPE_SMI 1
+#define DRM_KGSL_GEM_TYPE_KMEM 2
+#define DRM_KGSL_GEM_TYPE_KMEM_NOCACHE 3
+#define DRM_KGSL_GEM_TYPE_MEM_MASK 0xF
+
+/* Contiguous memory (PMEM) */
+#define DRM_KGSL_GEM_TYPE_PMEM 0x000100
+
+/* PMEM memory types */
+#define DRM_KGSL_GEM_PMEM_EBI 0x001000
+#define DRM_KGSL_GEM_PMEM_SMI 0x002000
+
+/* Standard paged memory */
+#define DRM_KGSL_GEM_TYPE_MEM 0x010000
+
+/* Caching controls */
+#define DRM_KGSL_GEM_CACHE_NONE 0x000000
+#define DRM_KGSL_GEM_CACHE_WCOMBINE 0x100000
+#define DRM_KGSL_GEM_CACHE_WTHROUGH 0x200000
+#define DRM_KGSL_GEM_CACHE_WBACK 0x400000
+#define DRM_KGSL_GEM_CACHE_WBACKWA 0x800000
+#define DRM_KGSL_GEM_CACHE_MASK 0xF00000
+
+/* FD based objects */
+#define DRM_KGSL_GEM_TYPE_FD_FBMEM 0x1000000
+#define DRM_KGSL_GEM_TYPE_FD_MASK 0xF000000
+
+/* Timestamp types */
+#define DRM_KGSL_GEM_TS_3D 0x00000430
+#define DRM_KGSL_GEM_TS_2D 0x00000180
+
+
+struct drm_kgsl_gem_create {
+ uint32_t size;
+ uint32_t handle;
+};
+
+struct drm_kgsl_gem_prep {
+ uint32_t handle;
+ uint32_t phys;
+ uint64_t offset;
+};
+
+struct drm_kgsl_gem_memtype {
+ uint32_t handle;
+ uint32_t type;
+};
+
+struct drm_kgsl_gem_mmap {
+ uint32_t handle;
+ uint32_t size;
+ uint32_t hostptr;
+ uint64_t offset;
+};
+
+struct drm_kgsl_gem_alloc {
+ uint32_t handle;
+ uint64_t offset;
+};
+
+struct drm_kgsl_gem_bind_gpu {
+ uint32_t handle;
+ uint32_t gpuptr;
+};
+
+struct drm_kgsl_gem_bufinfo {
+ uint32_t handle;
+ uint32_t count;
+ uint32_t active;
+ uint32_t offset[DRM_KGSL_GEM_MAX_BUFFERS];
+ uint32_t gpuaddr[DRM_KGSL_GEM_MAX_BUFFERS];
+};
+
+struct drm_kgsl_gem_bufcount {
+ uint32_t handle;
+ uint32_t bufcount;
+};
+
+struct drm_kgsl_gem_active {
+ uint32_t handle;
+ uint32_t active;
+};
+
+struct drm_kgsl_gem_lock_handles {
+ uint32_t num_handles;
+ uint32_t *handle_list;
+ uint32_t pid;
+ uint32_t lock_id; /* Returned lock id used for unlocking */
+};
+
+struct drm_kgsl_gem_unlock_handles {
+ uint32_t lock_id;
+};
+
+struct drm_kgsl_gem_unlock_on_ts {
+ uint32_t lock_id;
+ uint32_t timestamp; /* This field is a hw generated ts */
+ uint32_t type; /* Which pipe to check for ts generation */
+};
+
+struct drm_kgsl_gem_create_fd {
+ uint32_t fd;
+ uint32_t handle;
+};
+
+#endif
diff --git a/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_pipe.c b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_pipe.c
new file mode 100644
index 00000000000..3546718db2e
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_pipe.c
@@ -0,0 +1,280 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "kgsl_priv.h"
+
+
+static int kgsl_pipe_get_param(struct fd_pipe *pipe,
+ enum fd_param_id param, uint64_t *value)
+{
+ struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(pipe);
+ switch (param) {
+ case FD_DEVICE_ID:
+ *value = kgsl_pipe->devinfo.device_id;
+ return 0;
+ case FD_GPU_ID:
+ *value = kgsl_pipe->devinfo.gpu_id;
+ return 0;
+ case FD_GMEM_SIZE:
+ *value = kgsl_pipe->devinfo.gmem_sizebytes;
+ return 0;
+ case FD_CHIP_ID:
+ *value = kgsl_pipe->devinfo.chip_id;
+ return 0;
+ case FD_MAX_FREQ:
+ case FD_TIMESTAMP:
+ /* unsupported on kgsl */
+ return -1;
+ default:
+ ERROR_MSG("invalid param id: %d", param);
+ return -1;
+ }
+}
+
+static int kgsl_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp,
+ uint64_t timeout)
+{
+ struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(pipe);
+ struct kgsl_device_waittimestamp req = {
+ .timestamp = timestamp,
+ .timeout = 5000,
+ };
+ int ret;
+
+ do {
+ ret = ioctl(kgsl_pipe->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP, &req);
+ } while ((ret == -1) && ((errno == EINTR) || (errno == EAGAIN)));
+ if (ret)
+ ERROR_MSG("waittimestamp failed! %d (%s)", ret, strerror(errno));
+ else
+ kgsl_pipe_process_pending(kgsl_pipe, timestamp);
+ return ret;
+}
+
+drm_private int kgsl_pipe_timestamp(struct kgsl_pipe *kgsl_pipe,
+ uint32_t *timestamp)
+{
+ struct kgsl_cmdstream_readtimestamp req = {
+ .type = KGSL_TIMESTAMP_RETIRED
+ };
+ int ret = ioctl(kgsl_pipe->fd, IOCTL_KGSL_CMDSTREAM_READTIMESTAMP, &req);
+ if (ret) {
+ ERROR_MSG("readtimestamp failed! %d (%s)",
+ ret, strerror(errno));
+ return ret;
+ }
+ *timestamp = req.timestamp;
+ return 0;
+}
+
+static void kgsl_pipe_destroy(struct fd_pipe *pipe)
+{
+ struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(pipe);
+ struct kgsl_drawctxt_destroy req = {
+ .drawctxt_id = kgsl_pipe->drawctxt_id,
+ };
+
+ if (kgsl_pipe->drawctxt_id)
+ ioctl(kgsl_pipe->fd, IOCTL_KGSL_DRAWCTXT_DESTROY, &req);
+
+ if (kgsl_pipe->fd >= 0)
+ close(kgsl_pipe->fd);
+
+ free(kgsl_pipe);
+}
+
+static const struct fd_pipe_funcs funcs = {
+ .ringbuffer_new = kgsl_ringbuffer_new,
+ .get_param = kgsl_pipe_get_param,
+ .wait = kgsl_pipe_wait,
+ .destroy = kgsl_pipe_destroy,
+};
+
+drm_private int is_kgsl_pipe(struct fd_pipe *pipe)
+{
+ return pipe->funcs == &funcs;
+}
+
+/* add buffer to submit list when it is referenced in cmdstream: */
+drm_private void kgsl_pipe_add_submit(struct kgsl_pipe *kgsl_pipe,
+ struct kgsl_bo *kgsl_bo)
+{
+ struct fd_pipe *pipe = &kgsl_pipe->base;
+ struct fd_bo *bo = &kgsl_bo->base;
+ struct list_head *list = &kgsl_bo->list[pipe->id];
+ if (LIST_IS_EMPTY(list)) {
+ fd_bo_ref(bo);
+ } else {
+ list_del(list);
+ }
+ list_addtail(list, &kgsl_pipe->submit_list);
+}
+
+/* prepare buffers on submit list before flush: */
+drm_private void kgsl_pipe_pre_submit(struct kgsl_pipe *kgsl_pipe)
+{
+ struct fd_pipe *pipe = &kgsl_pipe->base;
+ struct kgsl_bo *kgsl_bo = NULL;
+
+ if (!kgsl_pipe->p3d)
+ kgsl_pipe->p3d = fd_pipe_new(pipe->dev, FD_PIPE_3D);
+
+ LIST_FOR_EACH_ENTRY(kgsl_bo, &kgsl_pipe->submit_list, list[pipe->id]) {
+ uint32_t timestamp = kgsl_bo_get_timestamp(kgsl_bo);
+ if (timestamp)
+ fd_pipe_wait(kgsl_pipe->p3d, timestamp);
+ }
+}
+
+/* process buffers on submit list after flush: */
+drm_private void kgsl_pipe_post_submit(struct kgsl_pipe *kgsl_pipe,
+ uint32_t timestamp)
+{
+ struct fd_pipe *pipe = &kgsl_pipe->base;
+ struct kgsl_bo *kgsl_bo = NULL, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(kgsl_bo, tmp, &kgsl_pipe->submit_list, list[pipe->id]) {
+ struct list_head *list = &kgsl_bo->list[pipe->id];
+ list_del(list);
+ kgsl_bo->timestamp[pipe->id] = timestamp;
+ list_addtail(list, &kgsl_pipe->pending_list);
+
+ kgsl_bo_set_timestamp(kgsl_bo, timestamp);
+ }
+
+ if (!kgsl_pipe_timestamp(kgsl_pipe, &timestamp))
+ kgsl_pipe_process_pending(kgsl_pipe, timestamp);
+}
+
+drm_private void kgsl_pipe_process_pending(struct kgsl_pipe *kgsl_pipe,
+ uint32_t timestamp)
+{
+ struct fd_pipe *pipe = &kgsl_pipe->base;
+ struct kgsl_bo *kgsl_bo = NULL, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(kgsl_bo, tmp, &kgsl_pipe->pending_list, list[pipe->id]) {
+ struct list_head *list = &kgsl_bo->list[pipe->id];
+ if (kgsl_bo->timestamp[pipe->id] > timestamp)
+ return;
+ list_delinit(list);
+ kgsl_bo->timestamp[pipe->id] = 0;
+ fd_bo_del(&kgsl_bo->base);
+ }
+}
+
+static int getprop(int fd, enum kgsl_property_type type,
+ void *value, int sizebytes)
+{
+ struct kgsl_device_getproperty req = {
+ .type = type,
+ .value = value,
+ .sizebytes = sizebytes,
+ };
+ return ioctl(fd, IOCTL_KGSL_DEVICE_GETPROPERTY, &req);
+}
+
+#define GETPROP(fd, prop, x) do { \
+ if (getprop((fd), KGSL_PROP_##prop, &(x), sizeof(x))) { \
+ ERROR_MSG("failed to get property: " #prop); \
+ goto fail; \
+ } } while (0)
+
+
+drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev,
+ enum fd_pipe_id id)
+{
+ static const char *paths[] = {
+ [FD_PIPE_3D] = "/dev/kgsl-3d0",
+ [FD_PIPE_2D] = "/dev/kgsl-2d0",
+ };
+ struct kgsl_drawctxt_create req = {
+ .flags = 0x2000, /* ??? */
+ };
+ struct kgsl_pipe *kgsl_pipe = NULL;
+ struct fd_pipe *pipe = NULL;
+ int ret, fd;
+
+ fd = open(paths[id], O_RDWR);
+ if (fd < 0) {
+ ERROR_MSG("could not open %s device: %d (%s)",
+ paths[id], fd, strerror(errno));
+ goto fail;
+ }
+
+ ret = ioctl(fd, IOCTL_KGSL_DRAWCTXT_CREATE, &req);
+ if (ret) {
+ ERROR_MSG("failed to allocate context: %d (%s)",
+ ret, strerror(errno));
+ goto fail;
+ }
+
+ kgsl_pipe = calloc(1, sizeof(*kgsl_pipe));
+ if (!kgsl_pipe) {
+ ERROR_MSG("allocation failed");
+ goto fail;
+ }
+
+ pipe = &kgsl_pipe->base;
+ pipe->funcs = &funcs;
+
+ kgsl_pipe->fd = fd;
+ kgsl_pipe->drawctxt_id = req.drawctxt_id;
+
+ list_inithead(&kgsl_pipe->submit_list);
+ list_inithead(&kgsl_pipe->pending_list);
+
+ GETPROP(fd, VERSION, kgsl_pipe->version);
+ GETPROP(fd, DEVICE_INFO, kgsl_pipe->devinfo);
+
+ INFO_MSG("Pipe Info:");
+ INFO_MSG(" Device: %s", paths[id]);
+ INFO_MSG(" Chip-id: %d.%d.%d.%d",
+ (kgsl_pipe->devinfo.chip_id >> 24) & 0xff,
+ (kgsl_pipe->devinfo.chip_id >> 16) & 0xff,
+ (kgsl_pipe->devinfo.chip_id >> 8) & 0xff,
+ (kgsl_pipe->devinfo.chip_id >> 0) & 0xff);
+ INFO_MSG(" Device-id: %d", kgsl_pipe->devinfo.device_id);
+ INFO_MSG(" GPU-id: %d", kgsl_pipe->devinfo.gpu_id);
+ INFO_MSG(" MMU enabled: %d", kgsl_pipe->devinfo.mmu_enabled);
+ INFO_MSG(" GMEM Base addr: 0x%08x", kgsl_pipe->devinfo.gmem_gpubaseaddr);
+ INFO_MSG(" GMEM size: 0x%08x", kgsl_pipe->devinfo.gmem_sizebytes);
+ INFO_MSG(" Driver version: %d.%d",
+ kgsl_pipe->version.drv_major, kgsl_pipe->version.drv_minor);
+ INFO_MSG(" Device version: %d.%d",
+ kgsl_pipe->version.dev_major, kgsl_pipe->version.dev_minor);
+
+ return pipe;
+fail:
+ if (pipe)
+ fd_pipe_del(pipe);
+ return NULL;
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_priv.h b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_priv.h
new file mode 100644
index 00000000000..6ab6496507e
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_priv.h
@@ -0,0 +1,120 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifndef KGSL_PRIV_H_
+#define KGSL_PRIV_H_
+
+#include "freedreno_priv.h"
+#include "msm_kgsl.h"
+#include "kgsl_drm.h"
+
+struct kgsl_device {
+ struct fd_device base;
+};
+
+static inline struct kgsl_device * to_kgsl_device(struct fd_device *x)
+{
+ return (struct kgsl_device *)x;
+}
+
+struct kgsl_pipe {
+ struct fd_pipe base;
+
+ int fd;
+ uint32_t drawctxt_id;
+
+ /* device properties: */
+ struct kgsl_version version;
+ struct kgsl_devinfo devinfo;
+
+ /* list of bo's that are referenced in ringbuffer but not
+ * submitted yet:
+ */
+ struct list_head submit_list;
+
+ /* list of bo's that have been submitted but timestamp has
+ * not passed yet (so still ref'd in active cmdstream)
+ */
+ struct list_head pending_list;
+
+ /* if we are the 2d pipe, and want to wait on a timestamp
+ * from 3d, we need to also internally open the 3d pipe:
+ */
+ struct fd_pipe *p3d;
+};
+
+static inline struct kgsl_pipe * to_kgsl_pipe(struct fd_pipe *x)
+{
+ return (struct kgsl_pipe *)x;
+}
+
+drm_private int is_kgsl_pipe(struct fd_pipe *pipe);
+
+struct kgsl_bo {
+ struct fd_bo base;
+ uint64_t offset;
+ uint32_t gpuaddr;
+ /* timestamp (per pipe) for bo's in a pipe's pending_list: */
+ uint32_t timestamp[FD_PIPE_MAX];
+ /* list-node for pipe's submit_list or pending_list */
+ struct list_head list[FD_PIPE_MAX];
+};
+
+static inline struct kgsl_bo * to_kgsl_bo(struct fd_bo *x)
+{
+ return (struct kgsl_bo *)x;
+}
+
+
+drm_private struct fd_device * kgsl_device_new(int fd);
+
+drm_private int kgsl_pipe_timestamp(struct kgsl_pipe *kgsl_pipe,
+ uint32_t *timestamp);
+drm_private void kgsl_pipe_add_submit(struct kgsl_pipe *pipe,
+ struct kgsl_bo *bo);
+drm_private void kgsl_pipe_pre_submit(struct kgsl_pipe *pipe);
+drm_private void kgsl_pipe_post_submit(struct kgsl_pipe *pipe,
+ uint32_t timestamp);
+drm_private void kgsl_pipe_process_pending(struct kgsl_pipe *pipe,
+ uint32_t timestamp);
+drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev,
+ enum fd_pipe_id id);
+
+drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
+ uint32_t size);
+
+drm_private int kgsl_bo_new_handle(struct fd_device *dev,
+ uint32_t size, uint32_t flags, uint32_t *handle);
+drm_private struct fd_bo * kgsl_bo_from_handle(struct fd_device *dev,
+ uint32_t size, uint32_t handle);
+
+drm_private uint32_t kgsl_bo_gpuaddr(struct kgsl_bo *bo, uint32_t offset);
+drm_private void kgsl_bo_set_timestamp(struct kgsl_bo *bo, uint32_t timestamp);
+drm_private uint32_t kgsl_bo_get_timestamp(struct kgsl_bo *bo);
+
+#endif /* KGSL_PRIV_H_ */
diff --git a/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_ringbuffer.c b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_ringbuffer.c
new file mode 100644
index 00000000000..7b3298abd01
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/kgsl/kgsl_ringbuffer.c
@@ -0,0 +1,231 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <assert.h>
+
+#include "freedreno_ringbuffer.h"
+#include "kgsl_priv.h"
+
+
+/* because kgsl tries to validate the gpuaddr on kernel side in ISSUEIBCMDS,
+ * we can't use normal gem bo's for ringbuffer.. someday the kernel part
+ * needs to be reworked into a single sane drm driver :-/
+ */
+struct kgsl_rb_bo {
+ struct kgsl_pipe *pipe;
+ void *hostptr;
+ uint32_t gpuaddr;
+ uint32_t size;
+};
+
+struct kgsl_ringbuffer {
+ struct fd_ringbuffer base;
+ struct kgsl_rb_bo *bo;
+};
+
+static inline struct kgsl_ringbuffer * to_kgsl_ringbuffer(struct fd_ringbuffer *x)
+{
+ return (struct kgsl_ringbuffer *)x;
+}
+
+static void kgsl_rb_bo_del(struct kgsl_rb_bo *bo)
+{
+ struct kgsl_sharedmem_free req = {
+ .gpuaddr = bo->gpuaddr,
+ };
+ int ret;
+
+ drm_munmap(bo->hostptr, bo->size);
+
+ ret = ioctl(bo->pipe->fd, IOCTL_KGSL_SHAREDMEM_FREE, &req);
+ if (ret) {
+ ERROR_MSG("sharedmem free failed: %s", strerror(errno));
+ }
+
+ free(bo);
+}
+
+static struct kgsl_rb_bo * kgsl_rb_bo_new(struct kgsl_pipe *pipe, uint32_t size)
+{
+ struct kgsl_rb_bo *bo;
+ struct kgsl_gpumem_alloc req = {
+ .size = ALIGN(size, 4096),
+ .flags = KGSL_MEMFLAGS_GPUREADONLY,
+ };
+ int ret;
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo) {
+ ERROR_MSG("allocation failed");
+ return NULL;
+ }
+ ret = ioctl(pipe->fd, IOCTL_KGSL_GPUMEM_ALLOC, &req);
+ if (ret) {
+ ERROR_MSG("gpumem allocation failed: %s", strerror(errno));
+ goto fail;
+ }
+
+ bo->pipe = pipe;
+ bo->gpuaddr = req.gpuaddr;
+ bo->size = size;
+ bo->hostptr = drm_mmap(NULL, size, PROT_WRITE|PROT_READ,
+ MAP_SHARED, pipe->fd, req.gpuaddr);
+
+ return bo;
+fail:
+ if (bo)
+ kgsl_rb_bo_del(bo);
+ return NULL;
+}
+
+static void * kgsl_ringbuffer_hostptr(struct fd_ringbuffer *ring)
+{
+ struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
+ return kgsl_ring->bo->hostptr;
+}
+
+static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start)
+{
+ struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
+ struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(ring->pipe);
+ uint32_t offset = (uint8_t *)last_start - (uint8_t *)ring->start;
+ struct kgsl_ibdesc ibdesc = {
+ .gpuaddr = kgsl_ring->bo->gpuaddr + offset,
+ .hostptr = last_start,
+ .sizedwords = ring->cur - last_start,
+ };
+ struct kgsl_ringbuffer_issueibcmds req = {
+ .drawctxt_id = kgsl_pipe->drawctxt_id,
+ .ibdesc_addr = (unsigned long)&ibdesc,
+ .numibs = 1,
+ .flags = KGSL_CONTEXT_SUBMIT_IB_LIST,
+ };
+ int ret;
+
+ kgsl_pipe_pre_submit(kgsl_pipe);
+
+ /* z180_cmdstream_issueibcmds() is made of fail: */
+ if (ring->pipe->id == FD_PIPE_2D) {
+ /* fix up size field in last cmd packet */
+ uint32_t last_size = (uint32_t)(ring->cur - last_start);
+ /* 5 is length of first packet, 2 for the two 7f000000's */
+ last_start[2] = last_size - (5 + 2);
+ ibdesc.gpuaddr = kgsl_ring->bo->gpuaddr;
+ ibdesc.hostptr = kgsl_ring->bo->hostptr;
+ ibdesc.sizedwords = 0x145;
+ req.timestamp = (uint32_t)kgsl_ring->bo->hostptr;
+ }
+
+ do {
+ ret = ioctl(kgsl_pipe->fd, IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS, &req);
+ } while ((ret == -1) && ((errno == EINTR) || (errno == EAGAIN)));
+ if (ret)
+ ERROR_MSG("issueibcmds failed! %d (%s)", ret, strerror(errno));
+
+ ring->last_timestamp = req.timestamp;
+ ring->last_start = ring->cur;
+
+ kgsl_pipe_post_submit(kgsl_pipe, req.timestamp);
+
+ return ret;
+}
+
+static void kgsl_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
+ const struct fd_reloc *r)
+{
+ struct kgsl_bo *kgsl_bo = to_kgsl_bo(r->bo);
+ uint32_t addr = kgsl_bo_gpuaddr(kgsl_bo, r->offset);
+ assert(addr);
+ if (r->shift < 0)
+ addr >>= -r->shift;
+ else
+ addr <<= r->shift;
+ (*ring->cur++) = addr | r->or;
+ kgsl_pipe_add_submit(to_kgsl_pipe(ring->pipe), kgsl_bo);
+}
+
+static uint32_t kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
+ struct fd_ringbuffer *target, uint32_t cmd_idx,
+ uint32_t submit_offset, uint32_t size)
+{
+ struct kgsl_ringbuffer *target_ring = to_kgsl_ringbuffer(target);
+ assert(cmd_idx == 0);
+ (*ring->cur++) = target_ring->bo->gpuaddr + submit_offset;
+ return size;
+}
+
+static void kgsl_ringbuffer_destroy(struct fd_ringbuffer *ring)
+{
+ struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
+ if (ring->last_timestamp)
+ fd_pipe_wait(ring->pipe, ring->last_timestamp);
+ if (kgsl_ring->bo)
+ kgsl_rb_bo_del(kgsl_ring->bo);
+ free(kgsl_ring);
+}
+
+static const struct fd_ringbuffer_funcs funcs = {
+ .hostptr = kgsl_ringbuffer_hostptr,
+ .flush = kgsl_ringbuffer_flush,
+ .emit_reloc = kgsl_ringbuffer_emit_reloc,
+ .emit_reloc_ring = kgsl_ringbuffer_emit_reloc_ring,
+ .destroy = kgsl_ringbuffer_destroy,
+};
+
+drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
+ uint32_t size)
+{
+ struct kgsl_ringbuffer *kgsl_ring;
+ struct fd_ringbuffer *ring = NULL;
+
+ kgsl_ring = calloc(1, sizeof(*kgsl_ring));
+ if (!kgsl_ring) {
+ ERROR_MSG("allocation failed");
+ goto fail;
+ }
+
+ ring = &kgsl_ring->base;
+ ring->funcs = &funcs;
+ ring->size = size;
+
+ kgsl_ring->bo = kgsl_rb_bo_new(to_kgsl_pipe(pipe), size);
+ if (!kgsl_ring->bo) {
+ ERROR_MSG("ringbuffer allocation failed");
+ goto fail;
+ }
+
+ return ring;
+fail:
+ if (ring)
+ fd_ringbuffer_del(ring);
+ return NULL;
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/kgsl/msm_kgsl.h b/chromium/third_party/libdrm/src/freedreno/kgsl/msm_kgsl.h
new file mode 100644
index 00000000000..5b36eeb43f2
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/kgsl/msm_kgsl.h
@@ -0,0 +1,519 @@
+#ifndef _MSM_KGSL_H
+#define _MSM_KGSL_H
+
+#define KGSL_VERSION_MAJOR 3
+#define KGSL_VERSION_MINOR 11
+
+/*context flags */
+#define KGSL_CONTEXT_SAVE_GMEM 0x00000001
+#define KGSL_CONTEXT_NO_GMEM_ALLOC 0x00000002
+#define KGSL_CONTEXT_SUBMIT_IB_LIST 0x00000004
+#define KGSL_CONTEXT_CTX_SWITCH 0x00000008
+#define KGSL_CONTEXT_PREAMBLE 0x00000010
+#define KGSL_CONTEXT_TRASH_STATE 0x00000020
+#define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040
+
+#define KGSL_CONTEXT_INVALID 0xffffffff
+
+/* Memory allocayion flags */
+#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
+
+/* generic flag values */
+#define KGSL_FLAGS_NORMALMODE 0x00000000
+#define KGSL_FLAGS_SAFEMODE 0x00000001
+#define KGSL_FLAGS_INITIALIZED0 0x00000002
+#define KGSL_FLAGS_INITIALIZED 0x00000004
+#define KGSL_FLAGS_STARTED 0x00000008
+#define KGSL_FLAGS_ACTIVE 0x00000010
+#define KGSL_FLAGS_RESERVED0 0x00000020
+#define KGSL_FLAGS_RESERVED1 0x00000040
+#define KGSL_FLAGS_RESERVED2 0x00000080
+#define KGSL_FLAGS_SOFT_RESET 0x00000100
+#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
+
+/* Clock flags to show which clocks should be controlled by a given platform */
+#define KGSL_CLK_SRC 0x00000001
+#define KGSL_CLK_CORE 0x00000002
+#define KGSL_CLK_IFACE 0x00000004
+#define KGSL_CLK_MEM 0x00000008
+#define KGSL_CLK_MEM_IFACE 0x00000010
+#define KGSL_CLK_AXI 0x00000020
+
+/*
+ * Reset status values for context
+ */
+enum kgsl_ctx_reset_stat {
+ KGSL_CTX_STAT_NO_ERROR = 0x00000000,
+ KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT = 0x00000001,
+ KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT = 0x00000002,
+ KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT = 0x00000003
+};
+
+#define KGSL_MAX_PWRLEVELS 5
+
+#define KGSL_CONVERT_TO_MBPS(val) \
+ (val*1000*1000U)
+
+/* device id */
+enum kgsl_deviceid {
+ KGSL_DEVICE_3D0 = 0x00000000,
+ KGSL_DEVICE_2D0 = 0x00000001,
+ KGSL_DEVICE_2D1 = 0x00000002,
+ KGSL_DEVICE_MAX = 0x00000003
+};
+
+enum kgsl_user_mem_type {
+ KGSL_USER_MEM_TYPE_PMEM = 0x00000000,
+ KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001,
+ KGSL_USER_MEM_TYPE_ADDR = 0x00000002,
+ KGSL_USER_MEM_TYPE_ION = 0x00000003,
+ KGSL_USER_MEM_TYPE_MAX = 0x00000004,
+};
+
+struct kgsl_devinfo {
+
+ unsigned int device_id;
+ /* chip revision id
+ * coreid:8 majorrev:8 minorrev:8 patch:8
+ */
+ unsigned int chip_id;
+ unsigned int mmu_enabled;
+ unsigned int gmem_gpubaseaddr;
+ /*
+ * This field contains the adreno revision
+ * number 200, 205, 220, etc...
+ */
+ unsigned int gpu_id;
+ unsigned int gmem_sizebytes;
+};
+
+/* this structure defines the region of memory that can be mmap()ed from this
+ driver. The timestamp fields are volatile because they are written by the
+ GPU
+*/
+struct kgsl_devmemstore {
+ volatile unsigned int soptimestamp;
+ unsigned int sbz;
+ volatile unsigned int eoptimestamp;
+ unsigned int sbz2;
+ volatile unsigned int ts_cmp_enable;
+ unsigned int sbz3;
+ volatile unsigned int ref_wait_ts;
+ unsigned int sbz4;
+ unsigned int current_context;
+ unsigned int sbz5;
+};
+
+#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
+ ((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
+ offsetof(struct kgsl_devmemstore, field))
+
+/* timestamp id*/
+enum kgsl_timestamp_type {
+ KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
+ KGSL_TIMESTAMP_RETIRED = 0x00000002, /* end-of-pipeline timestamp*/
+ KGSL_TIMESTAMP_QUEUED = 0x00000003,
+};
+
+/* property types - used with kgsl_device_getproperty */
+enum kgsl_property_type {
+ KGSL_PROP_DEVICE_INFO = 0x00000001,
+ KGSL_PROP_DEVICE_SHADOW = 0x00000002,
+ KGSL_PROP_DEVICE_POWER = 0x00000003,
+ KGSL_PROP_SHMEM = 0x00000004,
+ KGSL_PROP_SHMEM_APERTURES = 0x00000005,
+ KGSL_PROP_MMU_ENABLE = 0x00000006,
+ KGSL_PROP_INTERRUPT_WAITS = 0x00000007,
+ KGSL_PROP_VERSION = 0x00000008,
+ KGSL_PROP_GPU_RESET_STAT = 0x00000009,
+ KGSL_PROP_PWRCTRL = 0x0000000E,
+};
+
+struct kgsl_shadowprop {
+ unsigned int gpuaddr;
+ unsigned int size;
+ unsigned int flags; /* contains KGSL_FLAGS_ values */
+};
+
+struct kgsl_pwrlevel {
+ unsigned int gpu_freq;
+ unsigned int bus_freq;
+ unsigned int io_fraction;
+};
+
+struct kgsl_version {
+ unsigned int drv_major;
+ unsigned int drv_minor;
+ unsigned int dev_major;
+ unsigned int dev_minor;
+};
+
+#ifdef __KERNEL__
+
+#define KGSL_3D0_REG_MEMORY "kgsl_3d0_reg_memory"
+#define KGSL_3D0_IRQ "kgsl_3d0_irq"
+#define KGSL_2D0_REG_MEMORY "kgsl_2d0_reg_memory"
+#define KGSL_2D0_IRQ "kgsl_2d0_irq"
+#define KGSL_2D1_REG_MEMORY "kgsl_2d1_reg_memory"
+#define KGSL_2D1_IRQ "kgsl_2d1_irq"
+
+enum kgsl_iommu_context_id {
+ KGSL_IOMMU_CONTEXT_USER = 0,
+ KGSL_IOMMU_CONTEXT_PRIV = 1,
+};
+
+struct kgsl_iommu_ctx {
+ const char *iommu_ctx_name;
+ enum kgsl_iommu_context_id ctx_id;
+};
+
+struct kgsl_device_iommu_data {
+ const struct kgsl_iommu_ctx *iommu_ctxs;
+ int iommu_ctx_count;
+ unsigned int physstart;
+ unsigned int physend;
+};
+
+struct kgsl_device_platform_data {
+ struct kgsl_pwrlevel pwrlevel[KGSL_MAX_PWRLEVELS];
+ int init_level;
+ int num_levels;
+ int (*set_grp_async)(void);
+ unsigned int idle_timeout;
+ bool strtstp_sleepwake;
+ unsigned int nap_allowed;
+ unsigned int clk_map;
+ unsigned int idle_needed;
+ struct msm_bus_scale_pdata *bus_scale_table;
+ struct kgsl_device_iommu_data *iommu_data;
+ int iommu_count;
+ struct msm_dcvs_core_info *core_info;
+};
+
+#endif
+
+/* structure holds list of ibs */
+struct kgsl_ibdesc {
+ unsigned int gpuaddr;
+ void *hostptr;
+ unsigned int sizedwords;
+ unsigned int ctrl;
+};
+
+/* ioctls */
+#define KGSL_IOC_TYPE 0x09
+
+/* get misc info about the GPU
+ type should be a value from enum kgsl_property_type
+ value points to a structure that varies based on type
+ sizebytes is sizeof() that structure
+ for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
+ this structure contaings hardware versioning info.
+ for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
+ this is used to find mmap() offset and sizes for mapping
+ struct kgsl_memstore into userspace.
+*/
+struct kgsl_device_getproperty {
+ unsigned int type;
+ void *value;
+ unsigned int sizebytes;
+};
+
+#define IOCTL_KGSL_DEVICE_GETPROPERTY \
+ _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
+
+/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
+ */
+
+/* block until the GPU has executed past a given timestamp
+ * timeout is in milliseconds.
+ */
+struct kgsl_device_waittimestamp {
+ unsigned int timestamp;
+ unsigned int timeout;
+};
+
+#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
+ _IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
+
+struct kgsl_device_waittimestamp_ctxtid {
+ unsigned int context_id;
+ unsigned int timestamp;
+ unsigned int timeout;
+};
+
+#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
+ _IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
+
+/* issue indirect commands to the GPU.
+ * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
+ * ibaddr and sizedwords must specify a subset of a buffer created
+ * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
+ * flags may be a mask of KGSL_CONTEXT_ values
+ * timestamp is a returned counter value which can be passed to
+ * other ioctls to determine when the commands have been executed by
+ * the GPU.
+ */
+struct kgsl_ringbuffer_issueibcmds {
+ unsigned int drawctxt_id;
+ unsigned int ibdesc_addr;
+ unsigned int numibs;
+ unsigned int timestamp; /*output param */
+ unsigned int flags;
+};
+
+#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
+ _IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
+
+/* read the most recently executed timestamp value
+ * type should be a value from enum kgsl_timestamp_type
+ */
+struct kgsl_cmdstream_readtimestamp {
+ unsigned int type;
+ unsigned int timestamp; /*output param */
+};
+
+#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
+ _IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
+
+#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
+ _IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
+
+/* free memory when the GPU reaches a given timestamp.
+ * gpuaddr specify a memory region created by a
+ * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
+ * type should be a value from enum kgsl_timestamp_type
+ */
+struct kgsl_cmdstream_freememontimestamp {
+ unsigned int gpuaddr;
+ unsigned int type;
+ unsigned int timestamp;
+};
+
+#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
+ _IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
+
+/* Previous versions of this header had incorrectly defined
+ IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
+ of a write only ioctl. To ensure binary compatibility, the following
+ #define will be used to intercept the incorrect ioctl
+*/
+
+#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
+ _IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
+
+/* create a draw context, which is used to preserve GPU state.
+ * The flags field may contain a mask KGSL_CONTEXT_* values
+ */
+struct kgsl_drawctxt_create {
+ unsigned int flags;
+ unsigned int drawctxt_id; /*output param */
+};
+
+#define IOCTL_KGSL_DRAWCTXT_CREATE \
+ _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
+
+/* destroy a draw context */
+struct kgsl_drawctxt_destroy {
+ unsigned int drawctxt_id;
+};
+
+#define IOCTL_KGSL_DRAWCTXT_DESTROY \
+ _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
+
+/* add a block of pmem, fb, ashmem or user allocated address
+ * into the GPU address space */
+struct kgsl_map_user_mem {
+ int fd;
+ unsigned int gpuaddr; /*output param */
+ unsigned int len;
+ unsigned int offset;
+ unsigned int hostptr; /*input param */
+ enum kgsl_user_mem_type memtype;
+ unsigned int reserved; /* May be required to add
+ params for another mem type */
+};
+
+#define IOCTL_KGSL_MAP_USER_MEM \
+ _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
+
+struct kgsl_cmdstream_readtimestamp_ctxtid {
+ unsigned int context_id;
+ unsigned int type;
+ unsigned int timestamp; /*output param */
+};
+
+#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
+ _IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
+
+struct kgsl_cmdstream_freememontimestamp_ctxtid {
+ unsigned int context_id;
+ unsigned int gpuaddr;
+ unsigned int type;
+ unsigned int timestamp;
+};
+
+#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
+ _IOW(KGSL_IOC_TYPE, 0x17, \
+ struct kgsl_cmdstream_freememontimestamp_ctxtid)
+
+/* add a block of pmem or fb into the GPU address space */
+struct kgsl_sharedmem_from_pmem {
+ int pmem_fd;
+ unsigned int gpuaddr; /*output param */
+ unsigned int len;
+ unsigned int offset;
+};
+
+#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
+ _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
+
+/* remove memory from the GPU's address space */
+struct kgsl_sharedmem_free {
+ unsigned int gpuaddr;
+};
+
+#define IOCTL_KGSL_SHAREDMEM_FREE \
+ _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
+
+struct kgsl_cff_user_event {
+ unsigned char cff_opcode;
+ unsigned int op1;
+ unsigned int op2;
+ unsigned int op3;
+ unsigned int op4;
+ unsigned int op5;
+ unsigned int __pad[2];
+};
+
+#define IOCTL_KGSL_CFF_USER_EVENT \
+ _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
+
+struct kgsl_gmem_desc {
+ unsigned int x;
+ unsigned int y;
+ unsigned int width;
+ unsigned int height;
+ unsigned int pitch;
+};
+
+struct kgsl_buffer_desc {
+ void *hostptr;
+ unsigned int gpuaddr;
+ int size;
+ unsigned int format;
+ unsigned int pitch;
+ unsigned int enabled;
+};
+
+struct kgsl_bind_gmem_shadow {
+ unsigned int drawctxt_id;
+ struct kgsl_gmem_desc gmem_desc;
+ unsigned int shadow_x;
+ unsigned int shadow_y;
+ struct kgsl_buffer_desc shadow_buffer;
+ unsigned int buffer_id;
+};
+
+#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
+ _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
+
+/* add a block of memory into the GPU address space */
+struct kgsl_sharedmem_from_vmalloc {
+ unsigned int gpuaddr; /*output param */
+ unsigned int hostptr;
+ unsigned int flags;
+};
+
+#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
+ _IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
+
+#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
+ _IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
+
+struct kgsl_drawctxt_set_bin_base_offset {
+ unsigned int drawctxt_id;
+ unsigned int offset;
+};
+
+#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
+ _IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
+
+enum kgsl_cmdwindow_type {
+ KGSL_CMDWINDOW_MIN = 0x00000000,
+ KGSL_CMDWINDOW_2D = 0x00000000,
+ KGSL_CMDWINDOW_3D = 0x00000001, /* legacy */
+ KGSL_CMDWINDOW_MMU = 0x00000002,
+ KGSL_CMDWINDOW_ARBITER = 0x000000FF,
+ KGSL_CMDWINDOW_MAX = 0x000000FF,
+};
+
+/* write to the command window */
+struct kgsl_cmdwindow_write {
+ enum kgsl_cmdwindow_type target;
+ unsigned int addr;
+ unsigned int data;
+};
+
+#define IOCTL_KGSL_CMDWINDOW_WRITE \
+ _IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
+
+struct kgsl_gpumem_alloc {
+ unsigned long gpuaddr;
+ size_t size;
+ unsigned int flags;
+};
+
+#define IOCTL_KGSL_GPUMEM_ALLOC \
+ _IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
+
+struct kgsl_cff_syncmem {
+ unsigned int gpuaddr;
+ unsigned int len;
+ unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_CFF_SYNCMEM \
+ _IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
+
+/*
+ * A timestamp event allows the user space to register an action following an
+ * expired timestamp.
+ */
+
+struct kgsl_timestamp_event {
+ int type; /* Type of event (see list below) */
+ unsigned int timestamp; /* Timestamp to trigger event on */
+ unsigned int context_id; /* Context for the timestamp */
+ void *priv; /* Pointer to the event specific blob */
+ size_t len; /* Size of the event specific blob */
+};
+
+#define IOCTL_KGSL_TIMESTAMP_EVENT \
+ _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
+
+/* A genlock timestamp event releases an existing lock on timestamp expire */
+
+#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
+
+struct kgsl_timestamp_event_genlock {
+ int handle; /* Handle of the genlock lock to release */
+};
+
+/*
+ * Set a property within the kernel. Uses the same structure as
+ * IOCTL_KGSL_GETPROPERTY
+ */
+
+#define IOCTL_KGSL_SETPROPERTY \
+ _IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
+
+#ifdef __KERNEL__
+#ifdef CONFIG_MSM_KGSL_DRM
+int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
+ unsigned long *len);
+#else
+#define kgsl_gem_obj_addr(...) 0
+#endif
+#endif
+#endif /* _MSM_KGSL_H */
diff --git a/chromium/third_party/libdrm/src/freedreno/libdrm_freedreno.pc.in b/chromium/third_party/libdrm/src/freedreno/libdrm_freedreno.pc.in
new file mode 100644
index 00000000000..b736b65adcb
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/libdrm_freedreno.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_freedreno
+Description: Userspace interface to freedreno kernel DRM services
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -ldrm_freedreno
+Cflags: -I${includedir} -I${includedir}/libdrm -I${includedir}/freedreno
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/freedreno/msm/msm_bo.c b/chromium/third_party/libdrm/src/freedreno/msm/msm_bo.c
new file mode 100644
index 00000000000..72471df65b2
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/msm/msm_bo.c
@@ -0,0 +1,161 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "msm_priv.h"
+
+static int bo_allocate(struct msm_bo *msm_bo)
+{
+ struct fd_bo *bo = &msm_bo->base;
+ if (!msm_bo->offset) {
+ struct drm_msm_gem_info req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ /* if the buffer is already backed by pages then this
+ * doesn't actually do anything (other than giving us
+ * the offset)
+ */
+ ret = drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_INFO,
+ &req, sizeof(req));
+ if (ret) {
+ ERROR_MSG("alloc failed: %s", strerror(errno));
+ return ret;
+ }
+
+ msm_bo->offset = req.offset;
+ }
+
+ return 0;
+}
+
+static int msm_bo_offset(struct fd_bo *bo, uint64_t *offset)
+{
+ struct msm_bo *msm_bo = to_msm_bo(bo);
+ int ret = bo_allocate(msm_bo);
+ if (ret)
+ return ret;
+ *offset = msm_bo->offset;
+ return 0;
+}
+
+static int msm_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
+{
+ struct drm_msm_gem_cpu_prep req = {
+ .handle = bo->handle,
+ .op = op,
+ };
+
+ get_abs_timeout(&req.timeout, 5000000000);
+
+ return drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_PREP, &req, sizeof(req));
+}
+
+static void msm_bo_cpu_fini(struct fd_bo *bo)
+{
+ struct drm_msm_gem_cpu_fini req = {
+ .handle = bo->handle,
+ };
+
+ drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_FINI, &req, sizeof(req));
+}
+
+static int msm_bo_madvise(struct fd_bo *bo, int willneed)
+{
+ struct drm_msm_gem_madvise req = {
+ .handle = bo->handle,
+ .madv = willneed ? MSM_MADV_WILLNEED : MSM_MADV_DONTNEED,
+ };
+ int ret;
+
+ /* older kernels do not support this: */
+ if (bo->dev->version < FD_VERSION_MADVISE)
+ return willneed;
+
+ ret = drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_MADVISE, &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ return req.retained;
+}
+
+static void msm_bo_destroy(struct fd_bo *bo)
+{
+ struct msm_bo *msm_bo = to_msm_bo(bo);
+ free(msm_bo);
+
+}
+
+static const struct fd_bo_funcs funcs = {
+ .offset = msm_bo_offset,
+ .cpu_prep = msm_bo_cpu_prep,
+ .cpu_fini = msm_bo_cpu_fini,
+ .madvise = msm_bo_madvise,
+ .destroy = msm_bo_destroy,
+};
+
+/* allocate a buffer handle: */
+drm_private int msm_bo_new_handle(struct fd_device *dev,
+ uint32_t size, uint32_t flags, uint32_t *handle)
+{
+ struct drm_msm_gem_new req = {
+ .size = size,
+ .flags = MSM_BO_WC, // TODO figure out proper flags..
+ };
+ int ret;
+
+ ret = drmCommandWriteRead(dev->fd, DRM_MSM_GEM_NEW,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ *handle = req.handle;
+
+ return 0;
+}
+
+/* allocate a new buffer object */
+drm_private struct fd_bo * msm_bo_from_handle(struct fd_device *dev,
+ uint32_t size, uint32_t handle)
+{
+ struct msm_bo *msm_bo;
+ struct fd_bo *bo;
+
+ msm_bo = calloc(1, sizeof(*msm_bo));
+ if (!msm_bo)
+ return NULL;
+
+ bo = &msm_bo->base;
+ bo->funcs = &funcs;
+
+ return bo;
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/msm/msm_device.c b/chromium/third_party/libdrm/src/freedreno/msm/msm_device.c
new file mode 100644
index 00000000000..727baa44326
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/msm/msm_device.c
@@ -0,0 +1,68 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "msm_priv.h"
+
+static void msm_device_destroy(struct fd_device *dev)
+{
+ struct msm_device *msm_dev = to_msm_device(dev);
+ fd_bo_cache_cleanup(&msm_dev->ring_cache, 0);
+ free(msm_dev);
+}
+
+static const struct fd_device_funcs funcs = {
+ .bo_new_handle = msm_bo_new_handle,
+ .bo_from_handle = msm_bo_from_handle,
+ .pipe_new = msm_pipe_new,
+ .destroy = msm_device_destroy,
+};
+
+drm_private struct fd_device * msm_device_new(int fd)
+{
+ struct msm_device *msm_dev;
+ struct fd_device *dev;
+
+ msm_dev = calloc(1, sizeof(*msm_dev));
+ if (!msm_dev)
+ return NULL;
+
+ dev = &msm_dev->base;
+ dev->funcs = &funcs;
+
+ fd_bo_cache_init(&msm_dev->ring_cache, TRUE);
+
+ return dev;
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/msm/msm_drm.h b/chromium/third_party/libdrm/src/freedreno/msm/msm_drm.h
new file mode 100644
index 00000000000..cbf75c3d52f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/msm/msm_drm.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MSM_DRM_H__
+#define __MSM_DRM_H__
+
+#include <stddef.h>
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints:
+ * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
+ * user/kernel compatibility
+ * 2) Keep fields aligned to their size
+ * 3) Because of how drm_ioctl() works, we can add new fields at
+ * the end of an ioctl if some care is taken: drm_ioctl() will
+ * zero out the new fields at the tail of the ioctl, so a zero
+ * value should have a backwards compatible meaning. And for
+ * output params, userspace won't see the newly added output
+ * fields.. so that has to be somehow ok.
+ */
+
+#define MSM_PIPE_NONE 0x00
+#define MSM_PIPE_2D0 0x01
+#define MSM_PIPE_2D1 0x02
+#define MSM_PIPE_3D0 0x10
+
+/* timeouts are specified in clock-monotonic absolute times (to simplify
+ * restarting interrupted ioctls). The following struct is logically the
+ * same as 'struct timespec' but 32/64b ABI safe.
+ */
+struct drm_msm_timespec {
+ __s64 tv_sec; /* seconds */
+ __s64 tv_nsec; /* nanoseconds */
+};
+
+#define MSM_PARAM_GPU_ID 0x01
+#define MSM_PARAM_GMEM_SIZE 0x02
+#define MSM_PARAM_CHIP_ID 0x03
+#define MSM_PARAM_MAX_FREQ 0x04
+#define MSM_PARAM_TIMESTAMP 0x05
+
+struct drm_msm_param {
+ __u32 pipe; /* in, MSM_PIPE_x */
+ __u32 param; /* in, MSM_PARAM_x */
+ __u64 value; /* out (get_param) or in (set_param) */
+};
+
+/*
+ * GEM buffers:
+ */
+
+#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
+#define MSM_BO_GPU_READONLY 0x00000002
+#define MSM_BO_CACHE_MASK 0x000f0000
+/* cache modes */
+#define MSM_BO_CACHED 0x00010000
+#define MSM_BO_WC 0x00020000
+#define MSM_BO_UNCACHED 0x00040000
+
+#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
+ MSM_BO_GPU_READONLY | \
+ MSM_BO_CACHED | \
+ MSM_BO_WC | \
+ MSM_BO_UNCACHED)
+
+struct drm_msm_gem_new {
+ __u64 size; /* in */
+ __u32 flags; /* in, mask of MSM_BO_x */
+ __u32 handle; /* out */
+};
+
+struct drm_msm_gem_info {
+ __u32 handle; /* in */
+ __u32 pad;
+ __u64 offset; /* out, offset to pass to mmap() */
+};
+
+#define MSM_PREP_READ 0x01
+#define MSM_PREP_WRITE 0x02
+#define MSM_PREP_NOSYNC 0x04
+
+#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
+
+struct drm_msm_gem_cpu_prep {
+ __u32 handle; /* in */
+ __u32 op; /* in, mask of MSM_PREP_x */
+ struct drm_msm_timespec timeout; /* in */
+};
+
+struct drm_msm_gem_cpu_fini {
+ __u32 handle; /* in */
+};
+
+/*
+ * Cmdstream Submission:
+ */
+
+/* The value written into the cmdstream is logically:
+ *
+ * ((relocbuf->gpuaddr + reloc_offset) << shift) | or
+ *
+ * When we have GPU's w/ >32bit ptrs, it should be possible to deal
+ * with this by emit'ing two reloc entries with appropriate shift
+ * values. Or a new MSM_SUBMIT_CMD_x type would also be an option.
+ *
+ * NOTE that reloc's must be sorted by order of increasing submit_offset,
+ * otherwise EINVAL.
+ */
+struct drm_msm_gem_submit_reloc {
+ __u32 submit_offset; /* in, offset from submit_bo */
+ __u32 or; /* in, value OR'd with result */
+ __s32 shift; /* in, amount of left shift (can be negative) */
+ __u32 reloc_idx; /* in, index of reloc_bo buffer */
+ __u64 reloc_offset; /* in, offset from start of reloc_bo */
+};
+
+/* submit-types:
+ * BUF - this cmd buffer is executed normally.
+ * IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are
+ * processed normally, but the kernel does not setup an IB to
+ * this buffer in the first-level ringbuffer
+ * CTX_RESTORE_BUF - only executed if there has been a GPU context
+ * switch since the last SUBMIT ioctl
+ */
+#define MSM_SUBMIT_CMD_BUF 0x0001
+#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
+#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
+struct drm_msm_gem_submit_cmd {
+ __u32 type; /* in, one of MSM_SUBMIT_CMD_x */
+ __u32 submit_idx; /* in, index of submit_bo cmdstream buffer */
+ __u32 submit_offset; /* in, offset into submit_bo */
+ __u32 size; /* in, cmdstream size */
+ __u32 pad;
+ __u32 nr_relocs; /* in, number of submit_reloc's */
+ __u64 __user relocs; /* in, ptr to array of submit_reloc's */
+};
+
+/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
+ * cmdstream buffer(s) themselves or reloc entries) has one (and only
+ * one) entry in the submit->bos[] table.
+ *
+ * As a optimization, the current buffer (gpu virtual address) can be
+ * passed back through the 'presumed' field. If on a subsequent reloc,
+ * userspace passes back a 'presumed' address that is still valid,
+ * then patching the cmdstream for this entry is skipped. This can
+ * avoid kernel needing to map/access the cmdstream bo in the common
+ * case.
+ */
+#define MSM_SUBMIT_BO_READ 0x0001
+#define MSM_SUBMIT_BO_WRITE 0x0002
+
+#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+
+struct drm_msm_gem_submit_bo {
+ __u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
+ __u32 handle; /* in, GEM handle */
+ __u64 presumed; /* in/out, presumed buffer address */
+};
+
+/* Each cmdstream submit consists of a table of buffers involved, and
+ * one or more cmdstream buffers. This allows for conditional execution
+ * (context-restore), and IB buffers needed for per tile/bin draw cmds.
+ */
+struct drm_msm_gem_submit {
+ __u32 pipe; /* in, MSM_PIPE_x */
+ __u32 fence; /* out */
+ __u32 nr_bos; /* in, number of submit_bo's */
+ __u32 nr_cmds; /* in, number of submit_cmd's */
+ __u64 __user bos; /* in, ptr to array of submit_bo's */
+ __u64 __user cmds; /* in, ptr to array of submit_cmd's */
+};
+
+/* The normal way to synchronize with the GPU is just to CPU_PREP on
+ * a buffer if you need to access it from the CPU (other cmdstream
+ * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
+ * handle the required synchronization under the hood). This ioctl
+ * mainly just exists as a way to implement the gallium pipe_fence
+ * APIs without requiring a dummy bo to synchronize on.
+ */
+struct drm_msm_wait_fence {
+ __u32 fence; /* in */
+ __u32 pad;
+ struct drm_msm_timespec timeout; /* in */
+};
+
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
+#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
+#define __MSM_MADV_PURGED 2 /* internal state */
+
+struct drm_msm_gem_madvise {
+ __u32 handle; /* in, GEM handle */
+ __u32 madv; /* in, MSM_MADV_x */
+ __u32 retained; /* out, whether backing store still exists */
+};
+
+#define DRM_MSM_GET_PARAM 0x00
+/* placeholder:
+#define DRM_MSM_SET_PARAM 0x01
+ */
+#define DRM_MSM_GEM_NEW 0x02
+#define DRM_MSM_GEM_INFO 0x03
+#define DRM_MSM_GEM_CPU_PREP 0x04
+#define DRM_MSM_GEM_CPU_FINI 0x05
+#define DRM_MSM_GEM_SUBMIT 0x06
+#define DRM_MSM_WAIT_FENCE 0x07
+#define DRM_MSM_GEM_MADVISE 0x08
+#define DRM_MSM_NUM_IOCTLS 0x09
+
+#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
+#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
+#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
+#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
+#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
+#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
+#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
+#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __MSM_DRM_H__ */
diff --git a/chromium/third_party/libdrm/src/freedreno/msm/msm_pipe.c b/chromium/third_party/libdrm/src/freedreno/msm/msm_pipe.c
new file mode 100644
index 00000000000..f872e245927
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/msm/msm_pipe.c
@@ -0,0 +1,165 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "msm_priv.h"
+
+static int query_param(struct fd_pipe *pipe, uint32_t param,
+ uint64_t *value)
+{
+ struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
+ struct drm_msm_param req = {
+ .pipe = msm_pipe->pipe,
+ .param = param,
+ };
+ int ret;
+
+ ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_GET_PARAM,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ *value = req.value;
+
+ return 0;
+}
+
+static int msm_pipe_get_param(struct fd_pipe *pipe,
+ enum fd_param_id param, uint64_t *value)
+{
+ struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
+ switch(param) {
+ case FD_DEVICE_ID: // XXX probably get rid of this..
+ case FD_GPU_ID:
+ *value = msm_pipe->gpu_id;
+ return 0;
+ case FD_GMEM_SIZE:
+ *value = msm_pipe->gmem;
+ return 0;
+ case FD_CHIP_ID:
+ *value = msm_pipe->chip_id;
+ return 0;
+ case FD_MAX_FREQ:
+ return query_param(pipe, MSM_PARAM_MAX_FREQ, value);
+ case FD_TIMESTAMP:
+ return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
+ default:
+ ERROR_MSG("invalid param id: %d", param);
+ return -1;
+ }
+}
+
+static int msm_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp,
+ uint64_t timeout)
+{
+ struct fd_device *dev = pipe->dev;
+ struct drm_msm_wait_fence req = {
+ .fence = timestamp,
+ };
+ int ret;
+
+ get_abs_timeout(&req.timeout, timeout);
+
+ ret = drmCommandWrite(dev->fd, DRM_MSM_WAIT_FENCE, &req, sizeof(req));
+ if (ret) {
+ ERROR_MSG("wait-fence failed! %d (%s)", ret, strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+static void msm_pipe_destroy(struct fd_pipe *pipe)
+{
+ struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
+ free(msm_pipe);
+}
+
+static const struct fd_pipe_funcs funcs = {
+ .ringbuffer_new = msm_ringbuffer_new,
+ .get_param = msm_pipe_get_param,
+ .wait = msm_pipe_wait,
+ .destroy = msm_pipe_destroy,
+};
+
+static uint64_t get_param(struct fd_pipe *pipe, uint32_t param)
+{
+ uint64_t value;
+ int ret = query_param(pipe, param, &value);
+ if (ret) {
+ ERROR_MSG("get-param failed! %d (%s)", ret, strerror(errno));
+ return 0;
+ }
+ return value;
+}
+
+drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
+ enum fd_pipe_id id)
+{
+ static const uint32_t pipe_id[] = {
+ [FD_PIPE_3D] = MSM_PIPE_3D0,
+ [FD_PIPE_2D] = MSM_PIPE_2D0,
+ };
+ struct msm_pipe *msm_pipe = NULL;
+ struct fd_pipe *pipe = NULL;
+
+ msm_pipe = calloc(1, sizeof(*msm_pipe));
+ if (!msm_pipe) {
+ ERROR_MSG("allocation failed");
+ goto fail;
+ }
+
+ pipe = &msm_pipe->base;
+ pipe->funcs = &funcs;
+
+ /* initialize before get_param(): */
+ pipe->dev = dev;
+ msm_pipe->pipe = pipe_id[id];
+
+ /* these params should be supported since the first version of drm/msm: */
+ msm_pipe->gpu_id = get_param(pipe, MSM_PARAM_GPU_ID);
+ msm_pipe->gmem = get_param(pipe, MSM_PARAM_GMEM_SIZE);
+ msm_pipe->chip_id = get_param(pipe, MSM_PARAM_CHIP_ID);
+
+ if (! msm_pipe->gpu_id)
+ goto fail;
+
+ INFO_MSG("Pipe Info:");
+ INFO_MSG(" GPU-id: %d", msm_pipe->gpu_id);
+ INFO_MSG(" Chip-id: 0x%08x", msm_pipe->chip_id);
+ INFO_MSG(" GMEM size: 0x%08x", msm_pipe->gmem);
+
+ return pipe;
+fail:
+ if (pipe)
+ fd_pipe_del(pipe);
+ return NULL;
+}
diff --git a/chromium/third_party/libdrm/src/freedreno/msm/msm_priv.h b/chromium/third_party/libdrm/src/freedreno/msm/msm_priv.h
new file mode 100644
index 00000000000..6d670aab254
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/msm/msm_priv.h
@@ -0,0 +1,103 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifndef MSM_PRIV_H_
+#define MSM_PRIV_H_
+
+#include "freedreno_priv.h"
+
+#ifndef __user
+# define __user
+#endif
+
+#include "msm_drm.h"
+
+struct msm_device {
+ struct fd_device base;
+ struct fd_bo_cache ring_cache;
+ unsigned ring_cnt;
+};
+
+static inline struct msm_device * to_msm_device(struct fd_device *x)
+{
+ return (struct msm_device *)x;
+}
+
+drm_private struct fd_device * msm_device_new(int fd);
+
+struct msm_pipe {
+ struct fd_pipe base;
+ uint32_t pipe;
+ uint32_t gpu_id;
+ uint32_t gmem;
+ uint32_t chip_id;
+};
+
+static inline struct msm_pipe * to_msm_pipe(struct fd_pipe *x)
+{
+ return (struct msm_pipe *)x;
+}
+
+drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
+ enum fd_pipe_id id);
+
+drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
+ uint32_t size);
+
+struct msm_bo {
+ struct fd_bo base;
+ uint64_t offset;
+ uint64_t presumed;
+ /* to avoid excess hashtable lookups, cache the ring this bo was
+ * last emitted on (since that will probably also be the next ring
+ * it is emitted on)
+ */
+ unsigned current_ring_seqno;
+ uint32_t idx;
+};
+
+static inline struct msm_bo * to_msm_bo(struct fd_bo *x)
+{
+ return (struct msm_bo *)x;
+}
+
+drm_private int msm_bo_new_handle(struct fd_device *dev,
+ uint32_t size, uint32_t flags, uint32_t *handle);
+drm_private struct fd_bo * msm_bo_from_handle(struct fd_device *dev,
+ uint32_t size, uint32_t handle);
+
+static inline void get_abs_timeout(struct drm_msm_timespec *tv, uint64_t ns)
+{
+ struct timespec t;
+ uint32_t s = ns / 1000000000;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ tv->tv_sec = t.tv_sec + s;
+ tv->tv_nsec = t.tv_nsec + ns - (s * 1000000000);
+}
+
+#endif /* MSM_PRIV_H_ */
diff --git a/chromium/third_party/libdrm/src/freedreno/msm/msm_ringbuffer.c b/chromium/third_party/libdrm/src/freedreno/msm/msm_ringbuffer.c
new file mode 100644
index 00000000000..a78806c88bc
--- /dev/null
+++ b/chromium/third_party/libdrm/src/freedreno/msm/msm_ringbuffer.c
@@ -0,0 +1,581 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <assert.h>
+#include <inttypes.h>
+
+#include "freedreno_ringbuffer.h"
+#include "msm_priv.h"
+
+/* represents a single cmd buffer in the submit ioctl. Each cmd buffer has
+ * a backing bo, and a reloc table.
+ */
+struct msm_cmd {
+ struct list_head list;
+
+ struct fd_ringbuffer *ring;
+ struct fd_bo *ring_bo;
+
+ /* reloc's table: */
+ struct drm_msm_gem_submit_reloc *relocs;
+ uint32_t nr_relocs, max_relocs;
+
+ uint32_t size;
+};
+
+struct msm_ringbuffer {
+ struct fd_ringbuffer base;
+
+ /* submit ioctl related tables:
+ * Note that bos and cmds are tracked by the parent ringbuffer, since
+ * that is global to the submit ioctl call. The reloc's table is tracked
+ * per cmd-buffer.
+ */
+ struct {
+ /* bo's table: */
+ struct drm_msm_gem_submit_bo *bos;
+ uint32_t nr_bos, max_bos;
+
+ /* cmd's table: */
+ struct drm_msm_gem_submit_cmd *cmds;
+ uint32_t nr_cmds, max_cmds;
+ } submit;
+
+ /* should have matching entries in submit.bos: */
+ /* Note, only in parent ringbuffer */
+ struct fd_bo **bos;
+ uint32_t nr_bos, max_bos;
+
+ /* should have matching entries in submit.cmds: */
+ struct msm_cmd **cmds;
+ uint32_t nr_cmds, max_cmds;
+
+ /* List of physical cmdstream buffers (msm_cmd) assocated with this
+ * logical fd_ringbuffer.
+ *
+ * Note that this is different from msm_ringbuffer::cmds (which
+ * shadows msm_ringbuffer::submit::cmds for tracking submit ioctl
+ * related stuff, and *only* is tracked in the parent ringbuffer.
+ * And only has "completed" cmd buffers (ie. we already know the
+ * size) added via get_cmd().
+ */
+ struct list_head cmd_list;
+
+ int is_growable;
+ unsigned cmd_count;
+
+ unsigned seqno;
+
+ /* maps fd_bo to idx: */
+ void *bo_table;
+};
+
+static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
+{
+ return (struct msm_ringbuffer *)x;
+}
+
+#define INIT_SIZE 0x1000
+
+static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
+drm_private extern pthread_mutex_t table_lock;
+
+static void ring_bo_del(struct fd_device *dev, struct fd_bo *bo)
+{
+ int ret;
+
+ pthread_mutex_lock(&table_lock);
+ ret = fd_bo_cache_free(&to_msm_device(dev)->ring_cache, bo);
+ pthread_mutex_unlock(&table_lock);
+
+ if (ret == 0)
+ return;
+
+ fd_bo_del(bo);
+}
+
+static struct fd_bo * ring_bo_new(struct fd_device *dev, uint32_t size)
+{
+ struct fd_bo *bo;
+
+ bo = fd_bo_cache_alloc(&to_msm_device(dev)->ring_cache, &size, 0);
+ if (bo)
+ return bo;
+
+ bo = fd_bo_new(dev, size, 0);
+ if (!bo)
+ return NULL;
+
+ /* keep ringbuffer bo's out of the normal bo cache: */
+ bo->bo_reuse = FALSE;
+
+ return bo;
+}
+
+static void ring_cmd_del(struct msm_cmd *cmd)
+{
+ if (cmd->ring_bo)
+ ring_bo_del(cmd->ring->pipe->dev, cmd->ring_bo);
+ list_del(&cmd->list);
+ to_msm_ringbuffer(cmd->ring)->cmd_count--;
+ free(cmd->relocs);
+ free(cmd);
+}
+
+static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+ struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
+
+ if (!cmd)
+ return NULL;
+
+ cmd->ring = ring;
+ cmd->ring_bo = ring_bo_new(ring->pipe->dev, size);
+ if (!cmd->ring_bo)
+ goto fail;
+
+ list_addtail(&cmd->list, &msm_ring->cmd_list);
+ msm_ring->cmd_count++;
+
+ return cmd;
+
+fail:
+ ring_cmd_del(cmd);
+ return NULL;
+}
+
+static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
+{
+ if ((nr + 1) > *max) {
+ if ((*max * 2) < (nr + 1))
+ *max = nr + 5;
+ else
+ *max = *max * 2;
+ ptr = realloc(ptr, *max * sz);
+ }
+ return ptr;
+}
+
+#define APPEND(x, name) ({ \
+ (x)->name = grow((x)->name, (x)->nr_ ## name, &(x)->max_ ## name, sizeof((x)->name[0])); \
+ (x)->nr_ ## name ++; \
+})
+
+static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+ assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
+ return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
+}
+
+static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+ uint32_t idx;
+
+ idx = APPEND(&msm_ring->submit, bos);
+ idx = APPEND(msm_ring, bos);
+
+ msm_ring->submit.bos[idx].flags = 0;
+ msm_ring->submit.bos[idx].handle = bo->handle;
+ msm_ring->submit.bos[idx].presumed = to_msm_bo(bo)->presumed;
+
+ msm_ring->bos[idx] = fd_bo_ref(bo);
+
+ return idx;
+}
+
+/* add (if needed) bo, return idx: */
+static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+ struct msm_bo *msm_bo = to_msm_bo(bo);
+ uint32_t idx;
+ pthread_mutex_lock(&idx_lock);
+ if (msm_bo->current_ring_seqno == msm_ring->seqno) {
+ idx = msm_bo->idx;
+ } else {
+ void *val;
+
+ if (!msm_ring->bo_table)
+ msm_ring->bo_table = drmHashCreate();
+
+ if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
+ /* found */
+ idx = (uint32_t)(uintptr_t)val;
+ } else {
+ idx = append_bo(ring, bo);
+ val = (void *)(uintptr_t)idx;
+ drmHashInsert(msm_ring->bo_table, bo->handle, val);
+ }
+ msm_bo->current_ring_seqno = msm_ring->seqno;
+ msm_bo->idx = idx;
+ }
+ pthread_mutex_unlock(&idx_lock);
+ if (flags & FD_RELOC_READ)
+ msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
+ if (flags & FD_RELOC_WRITE)
+ msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_WRITE;
+ return idx;
+}
+
+static int check_cmd_bo(struct fd_ringbuffer *ring,
+ struct drm_msm_gem_submit_cmd *cmd, struct fd_bo *bo)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+ return msm_ring->submit.bos[cmd->submit_idx].handle == bo->handle;
+}
+
+/* Ensure that submit has corresponding entry in cmds table for the
+ * target cmdstream buffer:
+ */
+static void get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
+ uint32_t submit_offset, uint32_t size, uint32_t type)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+ struct drm_msm_gem_submit_cmd *cmd;
+ uint32_t i;
+
+ /* figure out if we already have a cmd buf: */
+ for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
+ cmd = &msm_ring->submit.cmds[i];
+ if ((cmd->submit_offset == submit_offset) &&
+ (cmd->size == size) &&
+ (cmd->type == type) &&
+ check_cmd_bo(ring, cmd, target_cmd->ring_bo))
+ return;
+ }
+
+ /* create cmd buf if not: */
+ i = APPEND(&msm_ring->submit, cmds);
+ APPEND(msm_ring, cmds);
+ msm_ring->cmds[i] = target_cmd;
+ cmd = &msm_ring->submit.cmds[i];
+ cmd->type = type;
+ cmd->submit_idx = bo2idx(ring, target_cmd->ring_bo, FD_RELOC_READ);
+ cmd->submit_offset = submit_offset;
+ cmd->size = size;
+ cmd->pad = 0;
+
+ target_cmd->size = size;
+}
+
+static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
+{
+ return fd_bo_map(current_cmd(ring)->ring_bo);
+}
+
+static uint32_t find_next_reloc_idx(struct msm_cmd *msm_cmd,
+ uint32_t start, uint32_t offset)
+{
+ uint32_t i;
+
+ /* a binary search would be more clever.. */
+ for (i = start; i < msm_cmd->nr_relocs; i++) {
+ struct drm_msm_gem_submit_reloc *reloc = &msm_cmd->relocs[i];
+ if (reloc->submit_offset >= offset)
+ return i;
+ }
+
+ return i;
+}
+
+static void delete_cmds(struct msm_ringbuffer *msm_ring)
+{
+ struct msm_cmd *cmd, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &msm_ring->cmd_list, list) {
+ ring_cmd_del(cmd);
+ }
+}
+
+static void flush_reset(struct fd_ringbuffer *ring)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+ unsigned i;
+
+ for (i = 0; i < msm_ring->nr_bos; i++) {
+ struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
+ msm_bo->current_ring_seqno = 0;
+ fd_bo_del(&msm_bo->base);
+ }
+
+ /* for each of the cmd buffers, clear their reloc's: */
+ for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
+ struct msm_cmd *target_cmd = msm_ring->cmds[i];
+ target_cmd->nr_relocs = 0;
+ }
+
+ msm_ring->submit.nr_cmds = 0;
+ msm_ring->submit.nr_bos = 0;
+ msm_ring->nr_cmds = 0;
+ msm_ring->nr_bos = 0;
+
+ if (msm_ring->bo_table) {
+ drmHashDestroy(msm_ring->bo_table);
+ msm_ring->bo_table = NULL;
+ }
+
+ if (msm_ring->is_growable) {
+ delete_cmds(msm_ring);
+ } else {
+ /* in old mode, just reset the # of relocs: */
+ current_cmd(ring)->nr_relocs = 0;
+ }
+}
+
+static void finalize_current_cmd(struct fd_ringbuffer *ring, uint32_t *last_start)
+{
+ uint32_t submit_offset, size, type;
+ struct fd_ringbuffer *parent;
+
+ if (ring->parent) {
+ parent = ring->parent;
+ type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
+ } else {
+ parent = ring;
+ type = MSM_SUBMIT_CMD_BUF;
+ }
+
+ submit_offset = offset_bytes(last_start, ring->start);
+ size = offset_bytes(ring->cur, last_start);
+
+ get_cmd(parent, current_cmd(ring), submit_offset, size, type);
+}
+
+static void dump_submit(struct msm_ringbuffer *msm_ring)
+{
+ uint32_t i, j;
+
+ for (i = 0; i < msm_ring->submit.nr_bos; i++) {
+ struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
+ ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
+ }
+ for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
+ struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
+ struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
+ ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
+ i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
+ for (j = 0; j < cmd->nr_relocs; j++) {
+ struct drm_msm_gem_submit_reloc *r = &relocs[j];
+ ERROR_MSG(" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
+ ", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
+ r->reloc_idx, r->reloc_offset);
+ }
+ }
+}
+
+static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+ struct drm_msm_gem_submit req = {
+ .pipe = to_msm_pipe(ring->pipe)->pipe,
+ };
+ uint32_t i;
+ int ret;
+
+ finalize_current_cmd(ring, last_start);
+
+ /* needs to be after get_cmd() as that could create bos/cmds table: */
+ req.bos = VOID2U64(msm_ring->submit.bos),
+ req.nr_bos = msm_ring->submit.nr_bos;
+ req.cmds = VOID2U64(msm_ring->submit.cmds),
+ req.nr_cmds = msm_ring->submit.nr_cmds;
+
+ /* for each of the cmd's fix up their reloc's: */
+ for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
+ struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
+ struct msm_cmd *msm_cmd = msm_ring->cmds[i];
+ uint32_t a = find_next_reloc_idx(msm_cmd, 0, cmd->submit_offset);
+ uint32_t b = find_next_reloc_idx(msm_cmd, a, cmd->submit_offset + cmd->size);
+ cmd->relocs = VOID2U64(&msm_cmd->relocs[a]);
+ cmd->nr_relocs = (b > a) ? b - a : 0;
+ }
+
+ DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
+
+ ret = drmCommandWriteRead(ring->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
+ &req, sizeof(req));
+ if (ret) {
+ ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
+ dump_submit(msm_ring);
+ } else if (!ret) {
+ /* update timestamp on all rings associated with submit: */
+ for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
+ struct msm_cmd *msm_cmd = msm_ring->cmds[i];
+ msm_cmd->ring->last_timestamp = req.fence;
+ }
+ }
+
+ flush_reset(ring);
+
+ return ret;
+}
+
+static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
+{
+ assert(to_msm_ringbuffer(ring)->is_growable);
+ finalize_current_cmd(ring, ring->last_start);
+ ring_cmd_new(ring, size);
+}
+
+static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
+{
+ flush_reset(ring);
+}
+
+static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
+ const struct fd_reloc *r)
+{
+ struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
+ struct msm_bo *msm_bo = to_msm_bo(r->bo);
+ struct drm_msm_gem_submit_reloc *reloc;
+ struct msm_cmd *cmd = current_cmd(ring);
+ uint32_t idx = APPEND(cmd, relocs);
+ uint32_t addr;
+
+ reloc = &cmd->relocs[idx];
+
+ reloc->reloc_idx = bo2idx(parent, r->bo, r->flags);
+ reloc->reloc_offset = r->offset;
+ reloc->or = r->or;
+ reloc->shift = r->shift;
+ reloc->submit_offset = offset_bytes(ring->cur, ring->start);
+
+ addr = msm_bo->presumed;
+ if (r->shift < 0)
+ addr >>= -r->shift;
+ else
+ addr <<= r->shift;
+ (*ring->cur++) = addr | r->or;
+}
+
+static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
+ struct fd_ringbuffer *target, uint32_t cmd_idx,
+ uint32_t submit_offset, uint32_t size)
+{
+ struct msm_cmd *cmd = NULL;
+ uint32_t idx = 0;
+
+ LIST_FOR_EACH_ENTRY(cmd, &to_msm_ringbuffer(target)->cmd_list, list) {
+ if (idx == cmd_idx)
+ break;
+ idx++;
+ }
+
+ assert(cmd && (idx == cmd_idx));
+
+ if (idx < (to_msm_ringbuffer(target)->cmd_count - 1)) {
+ /* All but the last cmd buffer is fully "baked" (ie. already has
+ * done get_cmd() to add it to the cmds table). But in this case,
+ * the size we get is invalid (since it is calculated from the
+ * last cmd buffer):
+ */
+ size = cmd->size;
+ } else {
+ get_cmd(ring, cmd, submit_offset, size, MSM_SUBMIT_CMD_IB_TARGET_BUF);
+ }
+
+ msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
+ .bo = cmd->ring_bo,
+ .flags = FD_RELOC_READ,
+ .offset = submit_offset,
+ });
+
+ return size;
+}
+
+static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
+{
+ return to_msm_ringbuffer(ring)->cmd_count;
+}
+
+static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+
+ flush_reset(ring);
+ delete_cmds(msm_ring);
+
+ free(msm_ring->submit.cmds);
+ free(msm_ring->submit.bos);
+ free(msm_ring->bos);
+ free(msm_ring->cmds);
+ free(msm_ring);
+}
+
+static const struct fd_ringbuffer_funcs funcs = {
+ .hostptr = msm_ringbuffer_hostptr,
+ .flush = msm_ringbuffer_flush,
+ .grow = msm_ringbuffer_grow,
+ .reset = msm_ringbuffer_reset,
+ .emit_reloc = msm_ringbuffer_emit_reloc,
+ .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
+ .cmd_count = msm_ringbuffer_cmd_count,
+ .destroy = msm_ringbuffer_destroy,
+};
+
+drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
+ uint32_t size)
+{
+ struct msm_ringbuffer *msm_ring;
+ struct fd_ringbuffer *ring = NULL;
+
+ msm_ring = calloc(1, sizeof(*msm_ring));
+ if (!msm_ring) {
+ ERROR_MSG("allocation failed");
+ goto fail;
+ }
+
+ if (size == 0) {
+ assert(pipe->dev->version >= FD_VERSION_UNLIMITED_CMDS);
+ size = INIT_SIZE;
+ msm_ring->is_growable = TRUE;
+ }
+
+ list_inithead(&msm_ring->cmd_list);
+ msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
+
+ ring = &msm_ring->base;
+ ring->funcs = &funcs;
+ ring->size = size;
+ ring->pipe = pipe; /* needed in ring_cmd_new() */
+
+ ring_cmd_new(ring, size);
+
+ return ring;
+fail:
+ if (ring)
+ fd_ringbuffer_del(ring);
+ return NULL;
+}
diff --git a/chromium/third_party/libdrm/src/include/drm/amdgpu_drm.h b/chromium/third_party/libdrm/src/include/drm/amdgpu_drm.h
new file mode 100644
index 00000000000..fbdd1185172
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/amdgpu_drm.h
@@ -0,0 +1,645 @@
+/* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef __AMDGPU_DRM_H__
+#define __AMDGPU_DRM_H__
+
+#include "drm.h"
+
+#define DRM_AMDGPU_GEM_CREATE 0x00
+#define DRM_AMDGPU_GEM_MMAP 0x01
+#define DRM_AMDGPU_CTX 0x02
+#define DRM_AMDGPU_BO_LIST 0x03
+#define DRM_AMDGPU_CS 0x04
+#define DRM_AMDGPU_INFO 0x05
+#define DRM_AMDGPU_GEM_METADATA 0x06
+#define DRM_AMDGPU_GEM_WAIT_IDLE 0x07
+#define DRM_AMDGPU_GEM_VA 0x08
+#define DRM_AMDGPU_WAIT_CS 0x09
+#define DRM_AMDGPU_GEM_OP 0x10
+#define DRM_AMDGPU_GEM_USERPTR 0x11
+
+#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
+#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
+#define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx)
+#define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list)
+#define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs)
+#define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info)
+#define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata)
+#define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle)
+#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va)
+#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
+#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
+#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+
+#define AMDGPU_GEM_DOMAIN_CPU 0x1
+#define AMDGPU_GEM_DOMAIN_GTT 0x2
+#define AMDGPU_GEM_DOMAIN_VRAM 0x4
+#define AMDGPU_GEM_DOMAIN_GDS 0x8
+#define AMDGPU_GEM_DOMAIN_GWS 0x10
+#define AMDGPU_GEM_DOMAIN_OA 0x20
+
+/* Flag that CPU access will be required for the case of VRAM domain */
+#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
+/* Flag that CPU access will not work, this VRAM domain is invisible */
+#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
+/* Flag that USWC attributes should be used for GTT */
+#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
+
+struct drm_amdgpu_gem_create_in {
+ /** the requested memory size */
+ uint64_t bo_size;
+ /** physical start_addr alignment in bytes for some HW requirements */
+ uint64_t alignment;
+ /** the requested memory domains */
+ uint64_t domains;
+ /** allocation flags */
+ uint64_t domain_flags;
+};
+
+struct drm_amdgpu_gem_create_out {
+ /** returned GEM object handle */
+ uint32_t handle;
+ uint32_t _pad;
+};
+
+union drm_amdgpu_gem_create {
+ struct drm_amdgpu_gem_create_in in;
+ struct drm_amdgpu_gem_create_out out;
+};
+
+/** Opcode to create new residency list. */
+#define AMDGPU_BO_LIST_OP_CREATE 0
+/** Opcode to destroy previously created residency list */
+#define AMDGPU_BO_LIST_OP_DESTROY 1
+/** Opcode to update resource information in the list */
+#define AMDGPU_BO_LIST_OP_UPDATE 2
+
+struct drm_amdgpu_bo_list_in {
+ /** Type of operation */
+ uint32_t operation;
+ /** Handle of list or 0 if we want to create one */
+ uint32_t list_handle;
+ /** Number of BOs in list */
+ uint32_t bo_number;
+ /** Size of each element describing BO */
+ uint32_t bo_info_size;
+ /** Pointer to array describing BOs */
+ uint64_t bo_info_ptr;
+};
+
+struct drm_amdgpu_bo_list_entry {
+ /** Handle of BO */
+ uint32_t bo_handle;
+ /** New (if specified) BO priority to be used during migration */
+ uint32_t bo_priority;
+};
+
+struct drm_amdgpu_bo_list_out {
+ /** Handle of resource list */
+ uint32_t list_handle;
+ uint32_t _pad;
+};
+
+union drm_amdgpu_bo_list {
+ struct drm_amdgpu_bo_list_in in;
+ struct drm_amdgpu_bo_list_out out;
+};
+
+/* context related */
+#define AMDGPU_CTX_OP_ALLOC_CTX 1
+#define AMDGPU_CTX_OP_FREE_CTX 2
+#define AMDGPU_CTX_OP_QUERY_STATE 3
+
+/* GPU reset status */
+#define AMDGPU_CTX_NO_RESET 0
+/* this the context caused it */
+#define AMDGPU_CTX_GUILTY_RESET 1
+/* some other context caused it */
+#define AMDGPU_CTX_INNOCENT_RESET 2
+/* unknown cause */
+#define AMDGPU_CTX_UNKNOWN_RESET 3
+
+struct drm_amdgpu_ctx_in {
+ /** AMDGPU_CTX_OP_* */
+ uint32_t op;
+ /** For future use, no flags defined so far */
+ uint32_t flags;
+ uint32_t ctx_id;
+ uint32_t _pad;
+};
+
+union drm_amdgpu_ctx_out {
+ struct {
+ uint32_t ctx_id;
+ uint32_t _pad;
+ } alloc;
+
+ struct {
+ /** For future use, no flags defined so far */
+ uint64_t flags;
+ /** Number of resets caused by this context so far. */
+ uint32_t hangs;
+ /** Reset status since the last call of the ioctl. */
+ uint32_t reset_status;
+ } state;
+};
+
+union drm_amdgpu_ctx {
+ struct drm_amdgpu_ctx_in in;
+ union drm_amdgpu_ctx_out out;
+};
+
+/*
+ * This is not a reliable API and you should expect it to fail for any
+ * number of reasons and have fallback path that do not use userptr to
+ * perform any operation.
+ */
+#define AMDGPU_GEM_USERPTR_READONLY (1 << 0)
+#define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1)
+#define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2)
+#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
+
+struct drm_amdgpu_gem_userptr {
+ uint64_t addr;
+ uint64_t size;
+ /* AMDGPU_GEM_USERPTR_* */
+ uint32_t flags;
+ /* Resulting GEM handle */
+ uint32_t handle;
+};
+
+/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
+#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
+#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
+#define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4
+#define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f
+#define AMDGPU_TILING_TILE_SPLIT_SHIFT 9
+#define AMDGPU_TILING_TILE_SPLIT_MASK 0x7
+#define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12
+#define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7
+#define AMDGPU_TILING_BANK_WIDTH_SHIFT 15
+#define AMDGPU_TILING_BANK_WIDTH_MASK 0x3
+#define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17
+#define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3
+#define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19
+#define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3
+#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
+#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
+
+#define AMDGPU_TILING_SET(field, value) \
+ (((value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
+#define AMDGPU_TILING_GET(value, field) \
+ (((value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
+
+#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
+#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
+
+/** The same structure is shared for input/output */
+struct drm_amdgpu_gem_metadata {
+ /** GEM Object handle */
+ uint32_t handle;
+ /** Do we want get or set metadata */
+ uint32_t op;
+ struct {
+ /** For future use, no flags defined so far */
+ uint64_t flags;
+ /** family specific tiling info */
+ uint64_t tiling_info;
+ uint32_t data_size_bytes;
+ uint32_t data[64];
+ } data;
+};
+
+struct drm_amdgpu_gem_mmap_in {
+ /** the GEM object handle */
+ uint32_t handle;
+ uint32_t _pad;
+};
+
+struct drm_amdgpu_gem_mmap_out {
+ /** mmap offset from the vma offset manager */
+ uint64_t addr_ptr;
+};
+
+union drm_amdgpu_gem_mmap {
+ struct drm_amdgpu_gem_mmap_in in;
+ struct drm_amdgpu_gem_mmap_out out;
+};
+
+struct drm_amdgpu_gem_wait_idle_in {
+ /** GEM object handle */
+ uint32_t handle;
+ /** For future use, no flags defined so far */
+ uint32_t flags;
+ /** Absolute timeout to wait */
+ uint64_t timeout;
+};
+
+struct drm_amdgpu_gem_wait_idle_out {
+ /** BO status: 0 - BO is idle, 1 - BO is busy */
+ uint32_t status;
+ /** Returned current memory domain */
+ uint32_t domain;
+};
+
+union drm_amdgpu_gem_wait_idle {
+ struct drm_amdgpu_gem_wait_idle_in in;
+ struct drm_amdgpu_gem_wait_idle_out out;
+};
+
+struct drm_amdgpu_wait_cs_in {
+ /** Command submission handle */
+ uint64_t handle;
+ /** Absolute timeout to wait */
+ uint64_t timeout;
+ uint32_t ip_type;
+ uint32_t ip_instance;
+ uint32_t ring;
+ uint32_t ctx_id;
+};
+
+struct drm_amdgpu_wait_cs_out {
+ /** CS status: 0 - CS completed, 1 - CS still busy */
+ uint64_t status;
+};
+
+union drm_amdgpu_wait_cs {
+ struct drm_amdgpu_wait_cs_in in;
+ struct drm_amdgpu_wait_cs_out out;
+};
+
+#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
+#define AMDGPU_GEM_OP_SET_PLACEMENT 1
+
+/* Sets or returns a value associated with a buffer. */
+struct drm_amdgpu_gem_op {
+ /** GEM object handle */
+ uint32_t handle;
+ /** AMDGPU_GEM_OP_* */
+ uint32_t op;
+ /** Input or return value */
+ uint64_t value;
+};
+
+#define AMDGPU_VA_OP_MAP 1
+#define AMDGPU_VA_OP_UNMAP 2
+
+/* Delay the page table update till the next CS */
+#define AMDGPU_VM_DELAY_UPDATE (1 << 0)
+
+/* Mapping flags */
+/* readable mapping */
+#define AMDGPU_VM_PAGE_READABLE (1 << 1)
+/* writable mapping */
+#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
+/* executable mapping, new for VI */
+#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
+
+struct drm_amdgpu_gem_va {
+ /** GEM object handle */
+ uint32_t handle;
+ uint32_t _pad;
+ /** AMDGPU_VA_OP_* */
+ uint32_t operation;
+ /** AMDGPU_VM_PAGE_* */
+ uint32_t flags;
+ /** va address to assign . Must be correctly aligned.*/
+ uint64_t va_address;
+ /** Specify offset inside of BO to assign. Must be correctly aligned.*/
+ uint64_t offset_in_bo;
+ /** Specify mapping size. Must be correctly aligned. */
+ uint64_t map_size;
+};
+
+#define AMDGPU_HW_IP_GFX 0
+#define AMDGPU_HW_IP_COMPUTE 1
+#define AMDGPU_HW_IP_DMA 2
+#define AMDGPU_HW_IP_UVD 3
+#define AMDGPU_HW_IP_VCE 4
+#define AMDGPU_HW_IP_NUM 5
+
+#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
+
+#define AMDGPU_CHUNK_ID_IB 0x01
+#define AMDGPU_CHUNK_ID_FENCE 0x02
+#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
+
+struct drm_amdgpu_cs_chunk {
+ uint32_t chunk_id;
+ uint32_t length_dw;
+ uint64_t chunk_data;
+};
+
+struct drm_amdgpu_cs_in {
+ /** Rendering context id */
+ uint32_t ctx_id;
+ /** Handle of resource list associated with CS */
+ uint32_t bo_list_handle;
+ uint32_t num_chunks;
+ uint32_t _pad;
+ /** this points to uint64_t * which point to cs chunks */
+ uint64_t chunks;
+};
+
+struct drm_amdgpu_cs_out {
+ uint64_t handle;
+};
+
+union drm_amdgpu_cs {
+ struct drm_amdgpu_cs_in in;
+ struct drm_amdgpu_cs_out out;
+};
+
+/* Specify flags to be used for IB */
+
+/* This IB should be submitted to CE */
+#define AMDGPU_IB_FLAG_CE (1<<0)
+
+/* CE Preamble */
+#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
+
+struct drm_amdgpu_cs_chunk_ib {
+ uint32_t _pad;
+ /** AMDGPU_IB_FLAG_* */
+ uint32_t flags;
+ /** Virtual address to begin IB execution */
+ uint64_t va_start;
+ /** Size of submission */
+ uint32_t ib_bytes;
+ /** HW IP to submit to */
+ uint32_t ip_type;
+ /** HW IP index of the same type to submit to */
+ uint32_t ip_instance;
+ /** Ring index to submit to */
+ uint32_t ring;
+};
+
+struct drm_amdgpu_cs_chunk_dep {
+ uint32_t ip_type;
+ uint32_t ip_instance;
+ uint32_t ring;
+ uint32_t ctx_id;
+ uint64_t handle;
+};
+
+struct drm_amdgpu_cs_chunk_fence {
+ uint32_t handle;
+ uint32_t offset;
+};
+
+struct drm_amdgpu_cs_chunk_data {
+ union {
+ struct drm_amdgpu_cs_chunk_ib ib_data;
+ struct drm_amdgpu_cs_chunk_fence fence_data;
+ };
+};
+
+/**
+ * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
+ *
+ */
+#define AMDGPU_IDS_FLAGS_FUSION 0x1
+
+/* indicate if acceleration can be working */
+#define AMDGPU_INFO_ACCEL_WORKING 0x00
+/* get the crtc_id from the mode object id? */
+#define AMDGPU_INFO_CRTC_FROM_ID 0x01
+/* query hw IP info */
+#define AMDGPU_INFO_HW_IP_INFO 0x02
+/* query hw IP instance count for the specified type */
+#define AMDGPU_INFO_HW_IP_COUNT 0x03
+/* timestamp for GL_ARB_timer_query */
+#define AMDGPU_INFO_TIMESTAMP 0x05
+/* Query the firmware version */
+#define AMDGPU_INFO_FW_VERSION 0x0e
+ /* Subquery id: Query VCE firmware version */
+ #define AMDGPU_INFO_FW_VCE 0x1
+ /* Subquery id: Query UVD firmware version */
+ #define AMDGPU_INFO_FW_UVD 0x2
+ /* Subquery id: Query GMC firmware version */
+ #define AMDGPU_INFO_FW_GMC 0x03
+ /* Subquery id: Query GFX ME firmware version */
+ #define AMDGPU_INFO_FW_GFX_ME 0x04
+ /* Subquery id: Query GFX PFP firmware version */
+ #define AMDGPU_INFO_FW_GFX_PFP 0x05
+ /* Subquery id: Query GFX CE firmware version */
+ #define AMDGPU_INFO_FW_GFX_CE 0x06
+ /* Subquery id: Query GFX RLC firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC 0x07
+ /* Subquery id: Query GFX MEC firmware version */
+ #define AMDGPU_INFO_FW_GFX_MEC 0x08
+ /* Subquery id: Query SMC firmware version */
+ #define AMDGPU_INFO_FW_SMC 0x0a
+ /* Subquery id: Query SDMA firmware version */
+ #define AMDGPU_INFO_FW_SDMA 0x0b
+/* number of bytes moved for TTM migration */
+#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
+/* the used VRAM size */
+#define AMDGPU_INFO_VRAM_USAGE 0x10
+/* the used GTT size */
+#define AMDGPU_INFO_GTT_USAGE 0x11
+/* Information about GDS, etc. resource configuration */
+#define AMDGPU_INFO_GDS_CONFIG 0x13
+/* Query information about VRAM and GTT domains */
+#define AMDGPU_INFO_VRAM_GTT 0x14
+/* Query information about register in MMR address space*/
+#define AMDGPU_INFO_READ_MMR_REG 0x15
+/* Query information about device: rev id, family, etc. */
+#define AMDGPU_INFO_DEV_INFO 0x16
+/* visible vram usage */
+#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
+
+#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
+#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
+#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
+#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
+
+/* Input structure for the INFO ioctl */
+struct drm_amdgpu_info {
+ /* Where the return value will be stored */
+ uint64_t return_pointer;
+ /* The size of the return value. Just like "size" in "snprintf",
+ * it limits how many bytes the kernel can write. */
+ uint32_t return_size;
+ /* The query request id. */
+ uint32_t query;
+
+ union {
+ struct {
+ uint32_t id;
+ uint32_t _pad;
+ } mode_crtc;
+
+ struct {
+ /** AMDGPU_HW_IP_* */
+ uint32_t type;
+ /**
+ * Index of the IP if there are more IPs of the same
+ * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
+ */
+ uint32_t ip_instance;
+ } query_hw_ip;
+
+ struct {
+ uint32_t dword_offset;
+ /** number of registers to read */
+ uint32_t count;
+ uint32_t instance;
+ /** For future use, no flags defined so far */
+ uint32_t flags;
+ } read_mmr_reg;
+
+ struct {
+ /** AMDGPU_INFO_FW_* */
+ uint32_t fw_type;
+ /**
+ * Index of the IP if there are more IPs of
+ * the same type.
+ */
+ uint32_t ip_instance;
+ /**
+ * Index of the engine. Whether this is used depends
+ * on the firmware type. (e.g. MEC, SDMA)
+ */
+ uint32_t index;
+ uint32_t _pad;
+ } query_fw;
+ };
+};
+
+struct drm_amdgpu_info_gds {
+ /** GDS GFX partition size */
+ uint32_t gds_gfx_partition_size;
+ /** GDS compute partition size */
+ uint32_t compute_partition_size;
+ /** total GDS memory size */
+ uint32_t gds_total_size;
+ /** GWS size per GFX partition */
+ uint32_t gws_per_gfx_partition;
+ /** GSW size per compute partition */
+ uint32_t gws_per_compute_partition;
+ /** OA size per GFX partition */
+ uint32_t oa_per_gfx_partition;
+ /** OA size per compute partition */
+ uint32_t oa_per_compute_partition;
+ uint32_t _pad;
+};
+
+struct drm_amdgpu_info_vram_gtt {
+ uint64_t vram_size;
+ uint64_t vram_cpu_accessible_size;
+ uint64_t gtt_size;
+};
+
+struct drm_amdgpu_info_firmware {
+ uint32_t ver;
+ uint32_t feature;
+};
+
+#define AMDGPU_VRAM_TYPE_UNKNOWN 0
+#define AMDGPU_VRAM_TYPE_GDDR1 1
+#define AMDGPU_VRAM_TYPE_DDR2 2
+#define AMDGPU_VRAM_TYPE_GDDR3 3
+#define AMDGPU_VRAM_TYPE_GDDR4 4
+#define AMDGPU_VRAM_TYPE_GDDR5 5
+#define AMDGPU_VRAM_TYPE_HBM 6
+#define AMDGPU_VRAM_TYPE_DDR3 7
+
+struct drm_amdgpu_info_device {
+ /** PCI Device ID */
+ uint32_t device_id;
+ /** Internal chip revision: A0, A1, etc.) */
+ uint32_t chip_rev;
+ uint32_t external_rev;
+ /** Revision id in PCI Config space */
+ uint32_t pci_rev;
+ uint32_t family;
+ uint32_t num_shader_engines;
+ uint32_t num_shader_arrays_per_engine;
+ /* in KHz */
+ uint32_t gpu_counter_freq;
+ uint64_t max_engine_clock;
+ uint64_t max_memory_clock;
+ /* cu information */
+ uint32_t cu_active_number;
+ uint32_t cu_ao_mask;
+ uint32_t cu_bitmap[4][4];
+ /** Render backend pipe mask. One render backend is CB+DB. */
+ uint32_t enabled_rb_pipes_mask;
+ uint32_t num_rb_pipes;
+ uint32_t num_hw_gfx_contexts;
+ uint32_t _pad;
+ uint64_t ids_flags;
+ /** Starting virtual address for UMDs. */
+ uint64_t virtual_address_offset;
+ /** The maximum virtual address */
+ uint64_t virtual_address_max;
+ /** Required alignment of virtual addresses. */
+ uint32_t virtual_address_alignment;
+ /** Page table entry - fragment size */
+ uint32_t pte_fragment_size;
+ uint32_t gart_page_size;
+ /** constant engine ram size*/
+ uint32_t ce_ram_size;
+ /** video memory type info*/
+ uint32_t vram_type;
+ /** video memory bit width*/
+ uint32_t vram_bit_width;
+ /* vce harvesting instance */
+ uint32_t vce_harvest_config;
+};
+
+struct drm_amdgpu_info_hw_ip {
+ /** Version of h/w IP */
+ uint32_t hw_ip_version_major;
+ uint32_t hw_ip_version_minor;
+ /** Capabilities */
+ uint64_t capabilities_flags;
+ /** command buffer address start alignment*/
+ uint32_t ib_start_alignment;
+ /** command buffer size alignment*/
+ uint32_t ib_size_alignment;
+ /** Bitmask of available rings. Bit 0 means ring 0, etc. */
+ uint32_t available_rings;
+ uint32_t _pad;
+};
+
+/*
+ * Supported GPU families
+ */
+#define AMDGPU_FAMILY_UNKNOWN 0
+#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
+#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
+#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
+#define AMDGPU_FAMILY_CZ 135 /* Carrizo */
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/drm.h b/chromium/third_party/libdrm/src/include/drm/drm.h
new file mode 100644
index 00000000000..a7b1a636938
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/drm.h
@@ -0,0 +1,882 @@
+/**
+ * \file drm.h
+ * Header for the Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#if defined(__linux__)
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
+
+#else /* One of the BSDs */
+
+#include <sys/ioccom.h>
+#include <sys/types.h>
+typedef int8_t __s8;
+typedef uint8_t __u8;
+typedef int16_t __s16;
+typedef uint16_t __u16;
+typedef int32_t __s32;
+typedef uint32_t __u32;
+typedef int64_t __s64;
+typedef uint64_t __u64;
+typedef size_t __kernel_size_t;
+typedef unsigned long drm_handle_t;
+
+#endif
+
+#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+typedef unsigned int drm_context_t;
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t;
+
+/**
+ * Cliprect.
+ *
+ * \warning: If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well
+ *
+ * \note KW: Actually it's illegal to change either for
+ * backwards-compatibility reasons.
+ */
+struct drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+};
+
+/**
+ * Drawable information.
+ */
+struct drm_drawable_info {
+ unsigned int num_rects;
+ struct drm_clip_rect *rects;
+};
+
+/**
+ * Texture region,
+ */
+struct drm_tex_region {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding;
+ unsigned int age;
+};
+
+/**
+ * Hardware lock.
+ *
+ * The lock structure is a simple cache-line aligned integer. To avoid
+ * processor bus contention on a multiprocessor system, there should not be any
+ * other data stored in the same cache line.
+ */
+struct drm_hw_lock {
+ __volatile__ unsigned int lock; /**< lock variable */
+ char padding[60]; /**< Pad to cache line */
+};
+
+/**
+ * DRM_IOCTL_VERSION ioctl argument type.
+ *
+ * \sa drmGetVersion().
+ */
+struct drm_version {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ __kernel_size_t name_len; /**< Length of name buffer */
+ char *name; /**< Name of driver */
+ __kernel_size_t date_len; /**< Length of date buffer */
+ char *date; /**< User-space buffer to hold date */
+ __kernel_size_t desc_len; /**< Length of desc buffer */
+ char *desc; /**< User-space buffer to hold desc */
+};
+
+/**
+ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
+ *
+ * \sa drmGetBusid() and drmSetBusId().
+ */
+struct drm_unique {
+ __kernel_size_t unique_len; /**< Length of unique */
+ char *unique; /**< Unique name for driver instantiation */
+};
+
+struct drm_list {
+ int count; /**< Length of user-space structures */
+ struct drm_version *version;
+};
+
+struct drm_block {
+ int unused;
+};
+
+/**
+ * DRM_IOCTL_CONTROL ioctl argument type.
+ *
+ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
+ */
+struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+};
+
+/**
+ * Type of memory to map.
+ */
+enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /**< no caching, no core dump */
+ _DRM_SHM = 2, /**< shared, cached */
+ _DRM_AGP = 3, /**< AGP/GART */
+ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
+ _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
+};
+
+/**
+ * Memory mapping flags.
+ */
+enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /**< shared, cached, locked */
+ _DRM_KERNEL = 0x08, /**< kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
+};
+
+struct drm_ctx_priv_map {
+ unsigned int ctx_id; /**< Context requesting private mapping */
+ void *handle; /**< Handle of map */
+};
+
+/**
+ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
+ * argument type.
+ *
+ * \sa drmAddMap().
+ */
+struct drm_map {
+ unsigned long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ void *handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+ /* Private data */
+};
+
+/**
+ * DRM_IOCTL_GET_CLIENT ioctl argument type.
+ */
+struct drm_client {
+ int idx; /**< Which client desired? */
+ int auth; /**< Is client authenticated? */
+ unsigned long pid; /**< Process ID */
+ unsigned long uid; /**< User ID */
+ unsigned long magic; /**< Magic */
+ unsigned long iocs; /**< Ioctl count */
+};
+
+enum drm_stat_type {
+ _DRM_STAT_LOCK,
+ _DRM_STAT_OPENS,
+ _DRM_STAT_CLOSES,
+ _DRM_STAT_IOCTLS,
+ _DRM_STAT_LOCKS,
+ _DRM_STAT_UNLOCKS,
+ _DRM_STAT_VALUE, /**< Generic value */
+ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
+ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
+
+ _DRM_STAT_IRQ, /**< IRQ */
+ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
+ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
+ _DRM_STAT_DMA, /**< DMA */
+ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
+ _DRM_STAT_MISSED /**< Missed DMA opportunity */
+ /* Add to the *END* of the list */
+};
+
+/**
+ * DRM_IOCTL_GET_STATS ioctl argument type.
+ */
+struct drm_stats {
+ unsigned long count;
+ struct {
+ unsigned long value;
+ enum drm_stat_type type;
+ } data[15];
+};
+
+/**
+ * Hardware locking flags.
+ */
+enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+};
+
+/**
+ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
+ *
+ * \sa drmGetLock() and drmUnlock().
+ */
+struct drm_lock {
+ int context;
+ enum drm_lock_flags flags;
+};
+
+/**
+ * DMA flags
+ *
+ * \warning
+ * These values \e must match xf86drm.h.
+ *
+ * \sa drm_dma.
+ */
+enum drm_dma_flags {
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note The buffer may not yet have
+ * been processed by the hardware --
+ * getting a hardware lock with the
+ * hardware quiescent will ensure
+ * that the buffer has been
+ * processed.
+ */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+};
+
+/**
+ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
+ *
+ * \sa drmAddBufs().
+ */
+struct drm_buf_desc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ enum {
+ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
+ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
+ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
+ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
+ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
+ } flags;
+ unsigned long agp_start; /**<
+ * Start address of where the AGP buffers are
+ * in the AGP aperture
+ */
+};
+
+/**
+ * DRM_IOCTL_INFO_BUFS ioctl argument type.
+ */
+struct drm_buf_info {
+ int count; /**< Entries in list */
+ struct drm_buf_desc *list;
+};
+
+/**
+ * DRM_IOCTL_FREE_BUFS ioctl argument type.
+ */
+struct drm_buf_free {
+ int count;
+ int *list;
+};
+
+/**
+ * Buffer information
+ *
+ * \sa drm_buf_map.
+ */
+struct drm_buf_pub {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ void *address; /**< Address of buffer */
+};
+
+/**
+ * DRM_IOCTL_MAP_BUFS ioctl argument type.
+ */
+struct drm_buf_map {
+ int count; /**< Length of the buffer list */
+#ifdef __cplusplus
+ void *virt;
+#else
+ void *virtual; /**< Mmap'd area in user-virtual */
+#endif
+ struct drm_buf_pub *list; /**< Buffer information */
+};
+
+/**
+ * DRM_IOCTL_DMA ioctl argument type.
+ *
+ * Indices here refer to the offset into the buffer list in drm_buf_get.
+ *
+ * \sa drmDMA().
+ */
+struct drm_dma {
+ int context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int *send_indices; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send */
+ enum drm_dma_flags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size for buffers */
+ int *request_indices; /**< Buffer information */
+ int *request_sizes;
+ int granted_count; /**< Number of buffers granted */
+};
+
+enum drm_ctx_flags {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+};
+
+/**
+ * DRM_IOCTL_ADD_CTX ioctl argument type.
+ *
+ * \sa drmCreateContext() and drmDestroyContext().
+ */
+struct drm_ctx {
+ drm_context_t handle;
+ enum drm_ctx_flags flags;
+};
+
+/**
+ * DRM_IOCTL_RES_CTX ioctl argument type.
+ */
+struct drm_ctx_res {
+ int count;
+ struct drm_ctx *contexts;
+};
+
+/**
+ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
+ */
+struct drm_draw {
+ drm_drawable_t handle;
+};
+
+/**
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+ DRM_DRAWABLE_CLIPRECTS
+} drm_drawable_info_type_t;
+
+struct drm_update_draw {
+ drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ unsigned long long data;
+};
+
+/**
+ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
+ */
+struct drm_auth {
+ drm_magic_t magic;
+};
+
+/**
+ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
+ *
+ * \sa drmGetInterruptFromBusID().
+ */
+struct drm_irq_busid {
+ int irq; /**< IRQ number */
+ int busnum; /**< bus number */
+ int devnum; /**< device number */
+ int funcnum; /**< function number */
+};
+
+enum drm_vblank_seq_type {
+ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ /* bits 1-6 are reserved for high crtcs */
+ _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
+ _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
+};
+#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
+
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
+
+struct drm_wait_vblank_request {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+};
+
+/**
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
+};
+
+#define _DRM_PRE_MODESET 1
+#define _DRM_POST_MODESET 2
+
+/**
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+struct drm_modeset_ctl {
+ __u32 crtc;
+ __u32 cmd;
+};
+
+/**
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+ * \sa drmAgpEnable().
+ */
+struct drm_agp_mode {
+ unsigned long mode; /**< AGP mode */
+};
+
+/**
+ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
+ *
+ * \sa drmAgpAlloc() and drmAgpFree().
+ */
+struct drm_agp_buffer {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for binding / unbinding */
+ unsigned long type; /**< Type of memory to allocate */
+ unsigned long physical; /**< Physical used by i810 */
+};
+
+/**
+ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
+ *
+ * \sa drmAgpBind() and drmAgpUnbind().
+ */
+struct drm_agp_binding {
+ unsigned long handle; /**< From drm_agp_buffer */
+ unsigned long offset; /**< In bytes -- will round to page boundary */
+};
+
+/**
+ * DRM_IOCTL_AGP_INFO ioctl argument type.
+ *
+ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
+ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
+ * drmAgpVendorId() and drmAgpDeviceId().
+ */
+struct drm_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /* physical address */
+ unsigned long aperture_size; /* bytes */
+ unsigned long memory_allowed; /* bytes */
+ unsigned long memory_used;
+
+ /* PCI information */
+ unsigned short id_vendor;
+ unsigned short id_device;
+};
+
+/**
+ * DRM_IOCTL_SG_ALLOC ioctl argument type.
+ */
+struct drm_scatter_gather {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for mapping / unmapping */
+};
+
+/**
+ * DRM_IOCTL_SET_VERSION ioctl argument type.
+ */
+struct drm_set_version {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+};
+
+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ __u32 handle;
+ __u32 pad;
+};
+
+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ __u32 handle;
+
+ /** Returned global name */
+ __u32 name;
+};
+
+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+struct drm_gem_open {
+ /** Name of object being opened */
+ __u32 name;
+
+ /** Returned handle for the object */
+ __u32 handle;
+
+ /** Returned size of the object */
+ __u64 size;
+};
+
+#define DRM_CAP_DUMB_BUFFER 0x1
+#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+#define DRM_CAP_PRIME 0x5
+#define DRM_PRIME_CAP_IMPORT 0x1
+#define DRM_PRIME_CAP_EXPORT 0x2
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
+/*
+ * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
+ * combination for the hardware cursor. The intention is that a hardware
+ * agnostic userspace can query a cursor plane size to use.
+ *
+ * Note that the cross-driver contract is to merely return a valid size;
+ * drivers are free to attach another meaning on top, eg. i915 returns the
+ * maximum plane size.
+ */
+#define DRM_CAP_CURSOR_WIDTH 0x8
+#define DRM_CAP_CURSOR_HEIGHT 0x9
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+
+/** DRM_IOCTL_GET_CAP ioctl argument type */
+struct drm_get_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+/**
+ * DRM_CLIENT_CAP_STEREO_3D
+ *
+ * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * monitor by advertising the supported 3D layouts in the flags of struct
+ * drm_mode_modeinfo.
+ */
+#define DRM_CLIENT_CAP_STEREO_3D 1
+
+/**
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES
+ *
+ * If set to 1, the DRM core will expose all planes (overlay, primary, and
+ * cursor) to userspace.
+ */
+#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
+
+/**
+ * DRM_CLIENT_CAP_ATOMIC
+ *
+ * If set to 1, the DRM core will expose atomic properties to userspace
+ */
+#define DRM_CLIENT_CAP_ATOMIC 3
+
+/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+struct drm_set_client_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+#define DRM_RDWR O_RDWR
+#define DRM_CLOEXEC O_CLOEXEC
+struct drm_prime_handle {
+ __u32 handle;
+
+ /** Flags.. only applicable for handle->fd */
+ __u32 flags;
+
+ /** Returned dmabuf file descriptor */
+ __s32 fd;
+};
+
+#include "drm_mode.h"
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
+#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
+#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+
+#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
+#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
+
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+
+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
+#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
+#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
+#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
+#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
+
+#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
+#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
+#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
+#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
+#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
+#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_GETPLANE2 DRM_IOWR(0xB6, struct drm_mode_get_plane2)
+#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
+#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
+#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
+#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
+#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
+#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
+
+/**
+ * Device specific ioctls should only be in their respective headers
+ * The device specific ioctl range is from 0x40 to 0x9f.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+ * drmCommandReadWrite().
+ */
+#define DRM_COMMAND_BASE 0x40
+#define DRM_COMMAND_END 0xA0
+
+/**
+ * Header for events written back to userspace on the drm fd. The
+ * type defines the type of event, the length specifies the total
+ * length of the event (including the header), and user_data is
+ * typically a 64 bit value passed with the ioctl that triggered the
+ * event. A read on the drm fd will always only return complete
+ * events, that is, if for example the read buffer is 100 bytes, and
+ * there are two 64 byte events pending, only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
+ * up are chipset specific.
+ */
+struct drm_event {
+ __u32 type;
+ __u32 length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+
+struct drm_event_vblank {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 sequence;
+ __u32 reserved;
+};
+
+/* typedef area */
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/drm_fourcc.h b/chromium/third_party/libdrm/src/include/drm/drm_fourcc.h
new file mode 100644
index 00000000000..d26d01b523f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/drm_fourcc.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_FOURCC_H
+#define DRM_FOURCC_H
+
+#include "drm.h"
+
+#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
+ ((__u32)(c) << 16) | ((__u32)(d) << 24))
+
+#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+
+/* color index */
+#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp Red */
+#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+
+/* 16 bpp RG */
+#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
+#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+
+/* 8 bpp RGB */
+#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* packed YCbCr */
+#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
+
+#define DRM_FORMAT_MT21 fourcc_code('M', 'T', '2', '1') /* Mediatek Block Mode */
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+
+/*
+ * Format Modifiers:
+ *
+ * Format modifiers describe, typically, a re-ordering or modification
+ * of the data in a plane of an FB. This can be used to express tiled/
+ * swizzled formats, or compression, or a combination of the two.
+ *
+ * The upper 8 bits of the format modifier are a vendor-id as assigned
+ * below. The lower 56 bits are assigned as vendor sees fit.
+ */
+
+/* Vendor Ids: */
+#define DRM_FORMAT_MOD_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
+#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
+#define DRM_FORMAT_MOD_VENDOR_NV 0x03
+#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
+#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
+/* add more to the end as needed */
+
+/* Vendor ID for downstream, interim ChromeOS specific modifiers. */
+#define DRM_FORMAT_MOD_VENDOR_CHROMEOS 0xf0
+
+#define fourcc_mod_code(vendor, val) \
+ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+
+/*
+ * Format Modifier tokens:
+ *
+ * When adding a new token please document the layout with a code comment,
+ * similar to the fourcc codes above. drm_fourcc.h is considered the
+ * authoritative source for all of these.
+ */
+
+/* Intel framebuffer modifiers */
+
+/*
+ * Intel X-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out row-major, with
+ * a platform-dependent stride. On top of that the memory can apply
+ * platform-depending swizzling of some higher address bits into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
+
+/*
+ * Intel Y-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
+ * chunks column-major, with a platform-dependent height. On top of that the
+ * memory can apply platform-depending swizzling of some higher address bits
+ * into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
+
+/*
+ * Intel Yf-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles in row-major layout.
+ * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
+ * are arranged in four groups (two wide, two high) with column-major layout.
+ * Each group therefore consits out of four 256 byte units, which are also laid
+ * out as 2x2 column-major.
+ * 256 byte units are made out of four 64 byte blocks of pixels, producing
+ * either a square block or a 2:1 unit.
+ * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
+ * in pixel depends on the pixel depth.
+ */
+#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
+
+/*
+ * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
+ *
+ * Macroblocks are laid in a Z-shape, and each pixel data is following the
+ * standard NV12 style.
+ * As for NV12, an image is the result of two frame buffers: one for Y,
+ * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer).
+ * Alignment requirements are (for each buffer):
+ * - multiple of 128 pixels for the width
+ * - multiple of 32 pixels for the height
+ *
+ * For more information: see https://linuxtv.org/downloads/v4l-dvb-apis/re32.html
+ */
+#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+
+/*
+ * Rockchip ARM Framebuffer Compression (AFBC)
+ *
+ * This modifier identifies the specific variant of AFBC supported by the
+ * Rockchip display hardware. It's technically a two-plane format: first a
+ * header with 16 bytes per block, followed by the block data aligned to 1024
+ * bytes. Each block is 16x16 pixels.
+ *
+ * Eventually ARM should define modifiers for the various AFBC types, but
+ * we'll use this in the meantime. We use the CHROMEOS vendor ID to make sure
+ * we don't clash with future vendor modifiers.
+ */
+#define DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC fourcc_mod_code(CHROMEOS, 1)
+
+#endif /* DRM_FOURCC_H */
diff --git a/chromium/third_party/libdrm/src/include/drm/drm_mode.h b/chromium/third_party/libdrm/src/include/drm/drm_mode.h
new file mode 100644
index 00000000000..6ea17ebe013
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/drm_mode.h
@@ -0,0 +1,651 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2007-2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_MODE_H
+#define _DRM_MODE_H
+
+#include "drm.h"
+
+#define DRM_DISPLAY_INFO_LEN 32
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN (1<<0)
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_PREFERRED (1<<3)
+#define DRM_MODE_TYPE_DEFAULT (1<<4)
+#define DRM_MODE_TYPE_USERDEF (1<<5)
+#define DRM_MODE_TYPE_DRIVER (1<<6)
+
+/* Video mode flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_FLAG_PHSYNC (1<<0)
+#define DRM_MODE_FLAG_NHSYNC (1<<1)
+#define DRM_MODE_FLAG_PVSYNC (1<<2)
+#define DRM_MODE_FLAG_NVSYNC (1<<3)
+#define DRM_MODE_FLAG_INTERLACE (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN (1<<5)
+#define DRM_MODE_FLAG_CSYNC (1<<6)
+#define DRM_MODE_FLAG_PCSYNC (1<<7)
+#define DRM_MODE_FLAG_NCSYNC (1<<8)
+#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST (1<<10)
+#define DRM_MODE_FLAG_PIXMUX (1<<11)
+#define DRM_MODE_FLAG_DBLCLK (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
+ /*
+ * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
+ * (define not exposed to user space).
+ */
+#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
+#define DRM_MODE_FLAG_3D_NONE (0<<14)
+#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
+#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
+#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
+#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+
+
+/* DPMS flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_DPMS_ON 0
+#define DRM_MODE_DPMS_STANDBY 1
+#define DRM_MODE_DPMS_SUSPEND 2
+#define DRM_MODE_DPMS_OFF 3
+
+/* Scaling mode options */
+#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
+ software can still scale) */
+#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
+#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
+#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
+
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE 0
+#define DRM_MODE_PICTURE_ASPECT_4_3 1
+#define DRM_MODE_PICTURE_ASPECT_16_9 2
+
+/* Dithering mode options */
+#define DRM_MODE_DITHERING_OFF 0
+#define DRM_MODE_DITHERING_ON 1
+#define DRM_MODE_DITHERING_AUTO 2
+
+/* Dirty info options */
+#define DRM_MODE_DIRTY_OFF 0
+#define DRM_MODE_DIRTY_ON 1
+#define DRM_MODE_DIRTY_ANNOTATE 2
+
+struct drm_mode_modeinfo {
+ __u32 clock;
+ __u16 hdisplay;
+ __u16 hsync_start;
+ __u16 hsync_end;
+ __u16 htotal;
+ __u16 hskew;
+ __u16 vdisplay;
+ __u16 vsync_start;
+ __u16 vsync_end;
+ __u16 vtotal;
+ __u16 vscan;
+
+ __u32 vrefresh;
+
+ __u32 flags;
+ __u32 type;
+ char name[DRM_DISPLAY_MODE_LEN];
+};
+
+struct drm_mode_card_res {
+ __u64 fb_id_ptr;
+ __u64 crtc_id_ptr;
+ __u64 connector_id_ptr;
+ __u64 encoder_id_ptr;
+ __u32 count_fbs;
+ __u32 count_crtcs;
+ __u32 count_connectors;
+ __u32 count_encoders;
+ __u32 min_width;
+ __u32 max_width;
+ __u32 min_height;
+ __u32 max_height;
+};
+
+struct drm_mode_crtc {
+ __u64 set_connectors_ptr;
+ __u32 count_connectors;
+
+ __u32 crtc_id; /**< Id */
+ __u32 fb_id; /**< Id of framebuffer */
+
+ __u32 x; /**< x Position on the framebuffer */
+ __u32 y; /**< y Position on the framebuffer */
+
+ __u32 gamma_size;
+ __u32 mode_valid;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
+#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
+
+/* Planes blend with or override other bits on the CRTC */
+struct drm_mode_set_plane {
+ __u32 plane_id;
+ __u32 crtc_id;
+ __u32 fb_id; /* fb object contains surface format type */
+ __u32 flags; /* see above flags */
+
+ /* Signed dest location allows it to be partially off screen */
+ __s32 crtc_x;
+ __s32 crtc_y;
+ __u32 crtc_w;
+ __u32 crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ __u32 src_x;
+ __u32 src_y;
+ __u32 src_h;
+ __u32 src_w;
+};
+
+struct drm_mode_get_plane {
+ __u32 plane_id;
+
+ __u32 crtc_id;
+ __u32 fb_id;
+
+ __u32 possible_crtcs;
+ __u32 gamma_size;
+
+ __u32 count_format_types;
+ __u64 format_type_ptr;
+};
+
+struct drm_format_modifier {
+ /* Bitmask of formats in get_plane format list this info
+ * applies to. */
+ __u64 formats;
+
+ /* This modifier can be used with the format for this plane. */
+ __u64 modifier;
+};
+
+struct drm_mode_get_plane2 {
+ __u32 plane_id;
+
+ __u32 crtc_id;
+ __u32 fb_id;
+
+ __u32 possible_crtcs;
+ __u32 gamma_size;
+
+ __u32 count_format_types;
+ __u64 format_type_ptr;
+
+ /* New in v2 */
+ __u32 count_format_modifiers;
+ __u32 flags;
+ __u64 format_modifier_ptr;
+};
+
+struct drm_mode_get_plane_res {
+ __u64 plane_id_ptr;
+ __u32 count_planes;
+};
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
+#define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI 6
+#define DRM_MODE_ENCODER_DPMST 7
+
+struct drm_mode_get_encoder {
+ __u32 encoder_id;
+ __u32 encoder_type;
+
+ __u32 crtc_id; /**< Id of crtc */
+
+ __u32 possible_crtcs;
+ __u32 possible_clones;
+};
+
+/* This is for connectors with multiple signal types. */
+/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
+#define DRM_MODE_SUBCONNECTOR_Automatic 0
+#define DRM_MODE_SUBCONNECTOR_Unknown 0
+#define DRM_MODE_SUBCONNECTOR_DVID 3
+#define DRM_MODE_SUBCONNECTOR_DVIA 4
+#define DRM_MODE_SUBCONNECTOR_Composite 5
+#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
+#define DRM_MODE_SUBCONNECTOR_Component 8
+#define DRM_MODE_SUBCONNECTOR_SCART 9
+
+#define DRM_MODE_CONNECTOR_Unknown 0
+#define DRM_MODE_CONNECTOR_VGA 1
+#define DRM_MODE_CONNECTOR_DVII 2
+#define DRM_MODE_CONNECTOR_DVID 3
+#define DRM_MODE_CONNECTOR_DVIA 4
+#define DRM_MODE_CONNECTOR_Composite 5
+#define DRM_MODE_CONNECTOR_SVIDEO 6
+#define DRM_MODE_CONNECTOR_LVDS 7
+#define DRM_MODE_CONNECTOR_Component 8
+#define DRM_MODE_CONNECTOR_9PinDIN 9
+#define DRM_MODE_CONNECTOR_DisplayPort 10
+#define DRM_MODE_CONNECTOR_HDMIA 11
+#define DRM_MODE_CONNECTOR_HDMIB 12
+#define DRM_MODE_CONNECTOR_TV 13
+#define DRM_MODE_CONNECTOR_eDP 14
+#define DRM_MODE_CONNECTOR_VIRTUAL 15
+#define DRM_MODE_CONNECTOR_DSI 16
+
+struct drm_mode_get_connector {
+
+ __u64 encoders_ptr;
+ __u64 modes_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+
+ __u32 count_modes;
+ __u32 count_props;
+ __u32 count_encoders;
+
+ __u32 encoder_id; /**< Current Encoder */
+ __u32 connector_id; /**< Id */
+ __u32 connector_type;
+ __u32 connector_type_id;
+
+ __u32 connection;
+ __u32 mm_width; /**< width in millimeters */
+ __u32 mm_height; /**< height in millimeters */
+ __u32 subpixel;
+
+ __u32 pad;
+};
+
+#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
+#define DRM_MODE_PROP_BLOB (1<<4)
+#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
+
+/* non-extended types: legacy bitmask, one bit per type: */
+#define DRM_MODE_PROP_LEGACY_TYPE ( \
+ DRM_MODE_PROP_RANGE | \
+ DRM_MODE_PROP_ENUM | \
+ DRM_MODE_PROP_BLOB | \
+ DRM_MODE_PROP_BITMASK)
+
+/* extended-types: rather than continue to consume a bit per type,
+ * grab a chunk of the bits to use as integer type id.
+ */
+#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
+#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
+#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
+#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
+
+/* the PROP_ATOMIC flag is used to hide properties from userspace that
+ * is not aware of atomic properties. This is mostly to work around
+ * older userspace (DDX drivers) that read/write each prop they find,
+ * witout being aware that this could be triggering a lengthy modeset.
+ */
+#define DRM_MODE_PROP_ATOMIC 0x80000000
+
+struct drm_mode_property_enum {
+ __u64 value;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_mode_get_property {
+ __u64 values_ptr; /* values and blob lengths */
+ __u64 enum_blob_ptr; /* enum and blob id ptrs */
+
+ __u32 prop_id;
+ __u32 flags;
+ char name[DRM_PROP_NAME_LEN];
+
+ __u32 count_values;
+ /* This is only used to count enum values, not blobs. The _blobs is
+ * simply because of a historical reason, i.e. backwards compat. */
+ __u32 count_enum_blobs;
+};
+
+struct drm_mode_connector_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 connector_id;
+};
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+#define DRM_MODE_OBJECT_ANY 0
+
+struct drm_mode_obj_get_properties {
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u32 count_props;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_obj_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_get_blob {
+ __u32 blob_id;
+ __u32 length;
+ __u64 data;
+};
+
+struct drm_mode_fb_cmd {
+ __u32 fb_id;
+ __u32 width;
+ __u32 height;
+ __u32 pitch;
+ __u32 bpp;
+ __u32 depth;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+
+struct drm_mode_fb_cmd2 {
+ __u32 fb_id;
+ __u32 width;
+ __u32 height;
+ __u32 pixel_format; /* fourcc code from drm_fourcc.h */
+ __u32 flags; /* see above flags */
+
+ /*
+ * In case of planar formats, this ioctl allows up to 4
+ * buffer objects with offsets and pitches per plane.
+ * The pitch and offset order is dictated by the fourcc,
+ * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8 bit Y samples
+ * followed by an interleaved U/V plane containing
+ * 8 bit 2x2 subsampled colour difference samples.
+ *
+ * So it would consist of Y as offsets[0] and UV as
+ * offsets[1]. Note that offsets[0] will generally
+ * be 0 (but this is not required).
+ *
+ * To accommodate tiled, compressed, etc formats, a per-plane
+ * modifier can be specified. The default value of zero
+ * indicates "native" format as specified by the fourcc.
+ * Vendor specific modifier token. This allows, for example,
+ * different tiling/swizzling pattern on different planes.
+ * See discussion above of DRM_FORMAT_MOD_xxx.
+ */
+ __u32 handles[4];
+ __u32 pitches[4]; /* pitch for each plane */
+ __u32 offsets[4]; /* offset of each plane */
+ __u64 modifier[4]; /* ie, tiling, compressed (per plane) */
+};
+
+#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+#define DRM_MODE_FB_DIRTY_FLAGS 0x03
+
+#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
+
+/*
+ * Mark a region of a framebuffer as dirty.
+ *
+ * Some hardware does not automatically update display contents
+ * as a hardware or software draw to a framebuffer. This ioctl
+ * allows userspace to tell the kernel and the hardware what
+ * regions of the framebuffer have changed.
+ *
+ * The kernel or hardware is free to update more then just the
+ * region specified by the clip rects. The kernel or hardware
+ * may also delay and/or coalesce several calls to dirty into a
+ * single update.
+ *
+ * Userspace may annotate the updates, the annotates are a
+ * promise made by the caller that the change is either a copy
+ * of pixels or a fill of a single color in the region specified.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
+ * the number of updated regions are half of num_clips given,
+ * where the clip rects are paired in src and dst. The width and
+ * height of each one of the pairs must match.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
+ * promises that the region specified of the clip rects is filled
+ * completely with a single color as given in the color argument.
+ */
+
+struct drm_mode_fb_dirty_cmd {
+ __u32 fb_id;
+ __u32 flags;
+ __u32 color;
+ __u32 num_clips;
+ __u64 clips_ptr;
+};
+
+struct drm_mode_mode_cmd {
+ __u32 connector_id;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_CURSOR_BO 0x01
+#define DRM_MODE_CURSOR_MOVE 0x02
+#define DRM_MODE_CURSOR_FLAGS 0x03
+
+/*
+ * depending on the value in flags different members are used.
+ *
+ * CURSOR_BO uses
+ * crtc_id
+ * width
+ * height
+ * handle - if 0 turns the cursor off
+ *
+ * CURSOR_MOVE uses
+ * crtc_id
+ * x
+ * y
+ */
+struct drm_mode_cursor {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+struct drm_mode_cursor2 {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+ __s32 hot_x;
+ __s32 hot_y;
+};
+
+struct drm_mode_crtc_lut {
+ __u32 crtc_id;
+ __u32 gamma_size;
+
+ /* pointers to arrays */
+ __u64 red;
+ __u64 green;
+ __u64 blue;
+};
+
+struct drm_color_ctm {
+ /* Conversion matrix in S31.32 format. */
+ __s64 matrix[9];
+};
+
+struct drm_color_lut {
+ /*
+ * Data is U0.16 fixed point format.
+ */
+ __u16 red;
+ __u16 green;
+ __u16 blue;
+ __u16 reserved;
+};
+
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * This ioctl will ask KMS to schedule a page flip for the specified
+ * crtc. Once any pending rendering targeting the specified fb (as of
+ * ioctl time) has completed, the crtc will be reprogrammed to display
+ * that fb after the next vertical refresh. The ioctl returns
+ * immediately, but subsequent rendering to the current fb will block
+ * in the execbuffer ioctl until the page flip happens. If a page
+ * flip is already pending as the ioctl is called, EBUSY will be
+ * returned.
+ *
+ * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
+ * event (see drm.h: struct drm_event_vblank) when the page flip is
+ * done. The user_data field passed in with this ioctl will be
+ * returned as the user_data field in the vblank event struct.
+ *
+ * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
+ * 'as soon as possible', meaning that it not delay waiting for vblank.
+ * This may cause tearing on the screen.
+ *
+ * The reserved field must be zero until we figure out something
+ * clever to use it for.
+ */
+
+struct drm_mode_crtc_page_flip {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 reserved;
+ __u64 user_data;
+};
+
+/* create a dumb scanout buffer */
+struct drm_mode_create_dumb {
+ __u32 height;
+ __u32 width;
+ __u32 bpp;
+ __u32 flags;
+ /* handle, pitch, size will be returned */
+ __u32 handle;
+ __u32 pitch;
+ __u64 size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_mode_destroy_dumb {
+ __u32 handle;
+};
+
+/* page-flip flags are valid, plus: */
+#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
+#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
+#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
+
+#define DRM_MODE_ATOMIC_FLAGS (\
+ DRM_MODE_PAGE_FLIP_EVENT |\
+ DRM_MODE_PAGE_FLIP_ASYNC |\
+ DRM_MODE_ATOMIC_TEST_ONLY |\
+ DRM_MODE_ATOMIC_NONBLOCK |\
+ DRM_MODE_ATOMIC_ALLOW_MODESET)
+
+struct drm_mode_atomic {
+ __u32 flags;
+ __u32 count_objs;
+ __u64 objs_ptr;
+ __u64 count_props_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u64 reserved;
+ __u64 user_data;
+};
+
+/**
+ * Create a new 'blob' data property, copying length bytes from data pointer,
+ * and returning new blob ID.
+ */
+struct drm_mode_create_blob {
+ /** Pointer to data to copy. */
+ __u64 data;
+ /** Length of data to copy. */
+ __u32 length;
+ /** Return: new property ID. */
+ __u32 blob_id;
+};
+
+/**
+ * Destroy a user-created blob property.
+ */
+struct drm_mode_destroy_blob {
+ __u32 blob_id;
+};
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/drm_sarea.h b/chromium/third_party/libdrm/src/include/drm/drm_sarea.h
new file mode 100644
index 00000000000..502934edc21
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/drm_sarea.h
@@ -0,0 +1,84 @@
+/**
+ * \file drm_sarea.h
+ * \brief SAREA definitions
+ *
+ * \author Michel Dänzer <michel@daenzer.net>
+ */
+
+/*
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_SAREA_H_
+#define _DRM_SAREA_H_
+
+#include "drm.h"
+
+/* SAREA area needs to be at least a page */
+#if defined(__alpha__)
+#define SAREA_MAX 0x2000U
+#elif defined(__mips__)
+#define SAREA_MAX 0x4000U
+#elif defined(__ia64__)
+#define SAREA_MAX 0x10000U /* 64kB */
+#else
+/* Intel 830M driver needs at least 8k SAREA */
+#define SAREA_MAX 0x2000U
+#endif
+
+/** Maximum number of drawables in the SAREA */
+#define SAREA_MAX_DRAWABLES 256
+
+#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
+
+/** SAREA drawable */
+struct drm_sarea_drawable {
+ unsigned int stamp;
+ unsigned int flags;
+};
+
+/** SAREA frame */
+struct drm_sarea_frame {
+ unsigned int x;
+ unsigned int y;
+ unsigned int width;
+ unsigned int height;
+ unsigned int fullscreen;
+};
+
+/** SAREA */
+struct drm_sarea {
+ /** first thing is always the DRM locking structure */
+ struct drm_hw_lock lock;
+ /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
+ struct drm_hw_lock drawable_lock;
+ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
+ struct drm_sarea_frame frame; /**< frame */
+ drm_context_t dummy_context;
+};
+
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
+
+#endif /* _DRM_SAREA_H_ */
diff --git a/chromium/third_party/libdrm/src/include/drm/evdi_drm.h b/chromium/third_party/libdrm/src/include/drm/evdi_drm.h
new file mode 100644
index 00000000000..3a00c7fcea8
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/evdi_drm.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015 - 2016 DisplayLink (UK) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#ifndef __UAPI_EVDI_DRM_H__
+#define __UAPI_EVDI_DRM_H__
+
+/* Output events sent from driver to evdi lib */
+#define DRM_EVDI_EVENT_UPDATE_READY 0x80000000
+#define DRM_EVDI_EVENT_DPMS 0x80000001
+#define DRM_EVDI_EVENT_MODE_CHANGED 0x80000002
+#define DRM_EVDI_EVENT_CRTC_STATE 0x80000003
+
+struct drm_evdi_event_update_ready {
+ struct drm_event base;
+};
+
+struct drm_evdi_event_dpms {
+ struct drm_event base;
+ int32_t mode;
+};
+
+struct drm_evdi_event_mode_changed {
+ struct drm_event base;
+ int32_t hdisplay;
+ int32_t vdisplay;
+ int32_t vrefresh;
+ int32_t bits_per_pixel;
+ uint32_t pixel_format;
+};
+
+struct drm_evdi_event_crtc_state {
+ struct drm_event base;
+ int32_t state;
+};
+
+struct drm_evdi_connect {
+ int32_t connected;
+ int32_t dev_index;
+ const unsigned char * __user edid;
+ unsigned int edid_length;
+};
+
+struct drm_evdi_mapfifo {
+ int32_t reserved;
+};
+
+struct drm_evdi_request_update {
+ int32_t reserved;
+};
+
+enum drm_evdi_grabpix_mode {
+ EVDI_GRABPIX_MODE_RECTS = 0,
+ EVDI_GRABPIX_MODE_DIRTY = 1,
+};
+
+struct drm_evdi_grabpix {
+ enum drm_evdi_grabpix_mode mode;
+ int32_t buf_width;
+ int32_t buf_height;
+ int32_t buf_byte_stride;
+ unsigned char __user *buffer;
+ int32_t num_rects;
+ struct drm_clip_rect __user *rects;
+};
+
+/* Input ioctls from evdi lib to driver */
+#define DRM_EVDI_CONNECT 0x00
+#define DRM_EVDI_REQUEST_UPDATE 0x01
+#define DRM_EVDI_GRABPIX 0x02
+/* LAST_IOCTL 0x5F -- 96 driver specific ioctls to use */
+
+#define DRM_IOCTL_EVDI_CONNECT DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EVDI_CONNECT, struct drm_evdi_connect)
+#define DRM_IOCTL_EVDI_REQUEST_UPDATE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EVDI_REQUEST_UPDATE, struct drm_evdi_request_update)
+#define DRM_IOCTL_EVDI_GRABPIX DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EVDI_GRABPIX, struct drm_evdi_grabpix)
+
+#endif /* __EVDI_UAPI_DRM_H__ */
+
diff --git a/chromium/third_party/libdrm/src/include/drm/i915_drm.h b/chromium/third_party/libdrm/src/include/drm/i915_drm.h
new file mode 100644
index 00000000000..c4ce6b2c6f8
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/i915_drm.h
@@ -0,0 +1,1173 @@
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRM_H_
+#define _I915_DRM_H_
+
+#include "drm.h"
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+/**
+ * DOC: uevents generated by i915 on it's device node
+ *
+ * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
+ * event from the gpu l3 cache. Additional information supplied is ROW,
+ * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
+ * track of these events and if a specific cache-line seems to have a
+ * persistent error remap it with the l3 remapping tool supplied in
+ * intel-gpu-tools. The value supplied with the event is always 1.
+ *
+ * I915_ERROR_UEVENT - Generated upon error detection, currently only via
+ * hangcheck. The error detection event is a good indicator of when things
+ * began to go badly. The value supplied with the event is a 1 upon error
+ * detection, and a 0 upon reset completion, signifying no more error
+ * exists. NOTE: Disabling hangcheck or reset via module parameter will
+ * cause the related events to not be seen.
+ *
+ * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
+ * the GPU. The value supplied with the event is always 1. NOTE: Disable
+ * reset via module parameter will cause this event to not be seen.
+ */
+#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
+#define I915_ERROR_UEVENT "ERROR"
+#define I915_RESET_UEVENT "RESET"
+
+/* Each region is a minimum of 16k, and there are at most 255 of them.
+ */
+#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
+ * of chars for next/prev indices */
+#define I915_LOG_MIN_TEX_REGION_SIZE 14
+
+typedef struct _drm_i915_init {
+ enum {
+ I915_INIT_DMA = 0x01,
+ I915_CLEANUP_DMA = 0x02,
+ I915_RESUME_DMA = 0x03
+ } func;
+ unsigned int mmio_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+ unsigned int chipset;
+} drm_i915_init_t;
+
+typedef struct _drm_i915_sarea {
+ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
+ int last_upload; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int ctxOwner; /* last context to upload state */
+ int texAge;
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+ int perf_boxes; /* performance boxes to be displayed */
+ int width, height; /* screen size in pixels */
+
+ drm_handle_t front_handle;
+ int front_offset;
+ int front_size;
+
+ drm_handle_t back_handle;
+ int back_offset;
+ int back_size;
+
+ drm_handle_t depth_handle;
+ int depth_offset;
+ int depth_size;
+
+ drm_handle_t tex_handle;
+ int tex_offset;
+ int tex_size;
+ int log_tex_granularity;
+ int pitch;
+ int rotation; /* 0, 90, 180 or 270 */
+ int rotated_offset;
+ int rotated_size;
+ int rotated_pitch;
+ int virtualX, virtualY;
+
+ unsigned int front_tiled;
+ unsigned int back_tiled;
+ unsigned int depth_tiled;
+ unsigned int rotated_tiled;
+ unsigned int rotated2_tiled;
+
+ int pipeA_x;
+ int pipeA_y;
+ int pipeA_w;
+ int pipeA_h;
+ int pipeB_x;
+ int pipeB_y;
+ int pipeB_w;
+ int pipeB_h;
+
+ /* fill out some space for old userspace triple buffer */
+ drm_handle_t unused_handle;
+ __u32 unused1, unused2, unused3;
+
+ /* buffer object handles for static buffers. May change
+ * over the lifetime of the client.
+ */
+ __u32 front_bo_handle;
+ __u32 back_bo_handle;
+ __u32 unused_bo_handle;
+ __u32 depth_bo_handle;
+
+} drm_i915_sarea_t;
+
+/* due to userspace building against these headers we need some compat here */
+#define planeA_x pipeA_x
+#define planeA_y pipeA_y
+#define planeA_w pipeA_w
+#define planeA_h pipeA_h
+#define planeB_x pipeB_x
+#define planeB_y pipeB_y
+#define planeB_w pipeB_w
+#define planeB_h pipeB_h
+
+/* Flags for perf_boxes
+ */
+#define I915_BOX_RING_EMPTY 0x1
+#define I915_BOX_FLIP 0x2
+#define I915_BOX_WAIT 0x4
+#define I915_BOX_TEXTURE_LOAD 0x8
+#define I915_BOX_LOST_CONTEXT 0x10
+
+/*
+ * i915 specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
+ */
+#define DRM_I915_INIT 0x00
+#define DRM_I915_FLUSH 0x01
+#define DRM_I915_FLIP 0x02
+#define DRM_I915_BATCHBUFFER 0x03
+#define DRM_I915_IRQ_EMIT 0x04
+#define DRM_I915_IRQ_WAIT 0x05
+#define DRM_I915_GETPARAM 0x06
+#define DRM_I915_SETPARAM 0x07
+#define DRM_I915_ALLOC 0x08
+#define DRM_I915_FREE 0x09
+#define DRM_I915_INIT_HEAP 0x0a
+#define DRM_I915_CMDBUFFER 0x0b
+#define DRM_I915_DESTROY_HEAP 0x0c
+#define DRM_I915_SET_VBLANK_PIPE 0x0d
+#define DRM_I915_GET_VBLANK_PIPE 0x0e
+#define DRM_I915_VBLANK_SWAP 0x0f
+#define DRM_I915_HWS_ADDR 0x11
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_ENTERVT 0x19
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
+#define DRM_I915_GEM_SW_FINISH 0x20
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
+#define DRM_I915_GEM_GET_APERTURE 0x23
+#define DRM_I915_GEM_MMAP_GTT 0x24
+#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE 0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
+#define DRM_I915_OVERLAY_ATTRS 0x28
+#define DRM_I915_GEM_EXECBUFFER2 0x29
+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GEM_WAIT 0x2c
+#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
+#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
+#define DRM_I915_GEM_SET_CACHING 0x2f
+#define DRM_I915_GEM_GET_CACHING 0x30
+#define DRM_I915_REG_READ 0x31
+#define DRM_I915_GET_RESET_STATS 0x32
+#define DRM_I915_GEM_USERPTR 0x33
+#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
+#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
+
+#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
+#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
+#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
+#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
+#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
+#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
+#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
+#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
+#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
+#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
+#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
+#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
+#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
+#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
+
+/* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+ */
+typedef struct drm_i915_batchbuffer {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer_t;
+
+/* As above, but pass a pointer to userspace buffer which can be
+ * validated by the kernel prior to sending to hardware.
+ */
+typedef struct _drm_i915_cmdbuffer {
+ char *buf; /* pointer to userspace command buffer */
+ int sz; /* nr bytes in buf */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer_t;
+
+/* Userspace can request & wait on irq's:
+ */
+typedef struct drm_i915_irq_emit {
+ int *irq_seq;
+} drm_i915_irq_emit_t;
+
+typedef struct drm_i915_irq_wait {
+ int irq_seq;
+} drm_i915_irq_wait_t;
+
+/* Ioctl to query kernel params:
+ */
+#define I915_PARAM_IRQ_ACTIVE 1
+#define I915_PARAM_ALLOW_BATCHBUFFER 2
+#define I915_PARAM_LAST_DISPATCH 3
+#define I915_PARAM_CHIPSET_ID 4
+#define I915_PARAM_HAS_GEM 5
+#define I915_PARAM_NUM_FENCES_AVAIL 6
+#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_HAS_PAGEFLIPPING 8
+#define I915_PARAM_HAS_EXECBUF2 9
+#define I915_PARAM_HAS_BSD 10
+#define I915_PARAM_HAS_BLT 11
+#define I915_PARAM_HAS_RELAXED_FENCING 12
+#define I915_PARAM_HAS_COHERENT_RINGS 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS 14
+#define I915_PARAM_HAS_RELAXED_DELTA 15
+#define I915_PARAM_HAS_GEN7_SOL_RESET 16
+#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_ALIASING_PPGTT 18
+#define I915_PARAM_HAS_WAIT_TIMEOUT 19
+#define I915_PARAM_HAS_SEMAPHORES 20
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
+#define I915_PARAM_HAS_VEBOX 22
+#define I915_PARAM_HAS_SECURE_BATCHES 23
+#define I915_PARAM_HAS_PINNED_BATCHES 24
+#define I915_PARAM_HAS_EXEC_NO_RELOC 25
+#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
+#define I915_PARAM_HAS_WT 27
+#define I915_PARAM_CMD_PARSER_VERSION 28
+#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
+#define I915_PARAM_MMAP_VERSION 30
+#define I915_PARAM_HAS_BSD2 31
+#define I915_PARAM_REVISION 32
+#define I915_PARAM_SUBSLICE_TOTAL 33
+#define I915_PARAM_EU_TOTAL 34
+#define I915_PARAM_HAS_GPU_RESET 35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
+#define I915_PARAM_HAS_EXEC_SOFTPIN 37
+
+typedef struct drm_i915_getparam {
+ __s32 param;
+ /*
+ * WARNING: Using pointers instead of fixed-size u64 means we need to write
+ * compat32 code. Don't repeat this mistake.
+ */
+ int *value;
+} drm_i915_getparam_t;
+
+/* Ioctl to set kernel params:
+ */
+#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
+#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
+#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+#define I915_SETPARAM_NUM_USED_FENCES 4
+
+typedef struct drm_i915_setparam {
+ int param;
+ int value;
+} drm_i915_setparam_t;
+
+/* A memory manager for regions of shared memory:
+ */
+#define I915_MEM_REGION_AGP 1
+
+typedef struct drm_i915_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int *region_offset; /* offset from start of fb or agp */
+} drm_i915_mem_alloc_t;
+
+typedef struct drm_i915_mem_free {
+ int region;
+ int region_offset;
+} drm_i915_mem_free_t;
+
+typedef struct drm_i915_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_i915_mem_init_heap_t;
+
+/* Allow memory manager to be torn down and re-initialized (eg on
+ * rotate):
+ */
+typedef struct drm_i915_mem_destroy_heap {
+ int region;
+} drm_i915_mem_destroy_heap_t;
+
+/* Allow X server to configure which pipes to monitor for vblank signals
+ */
+#define DRM_I915_VBLANK_PIPE_A 1
+#define DRM_I915_VBLANK_PIPE_B 2
+
+typedef struct drm_i915_vblank_pipe {
+ int pipe;
+} drm_i915_vblank_pipe_t;
+
+/* Schedule buffer swap at given vertical blank:
+ */
+typedef struct drm_i915_vblank_swap {
+ drm_drawable_t drawable;
+ enum drm_vblank_seq_type seqtype;
+ unsigned int sequence;
+} drm_i915_vblank_swap_t;
+
+typedef struct drm_i915_hws_addr {
+ __u64 addr;
+} drm_i915_hws_addr_t;
+
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ __u64 gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ __u64 gtt_end;
+};
+
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ __u64 size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to read from */
+ __u64 offset;
+ /** Length of data to read */
+ __u64 size;
+ /**
+ * Pointer to write the data into.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to write to */
+ __u64 offset;
+ /** Length of data to write */
+ __u64 size;
+ /**
+ * Pointer to read the data from.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset in the object to map. */
+ __u64 offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ __u64 size;
+ /**
+ * Returned pointer the data was mapped at.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 addr_ptr;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * Added in version 2.
+ */
+ __u64 flags;
+#define I915_MMAP_WC 0x1
+};
+
+struct drm_i915_gem_mmap_gtt {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ __u32 handle;
+
+ /** New read domains */
+ __u32 read_domains;
+
+ /** New write domain */
+ __u32 write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+ /** Handle for the object */
+ __u32 handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ __u32 target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ __u32 delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ __u64 offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ __u64 presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ __u32 read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ __u32 write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT 0x00000040
+/** @} */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ __u64 offset;
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ __u64 buffers_ptr;
+ __u32 buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ __u32 batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
+};
+
+struct drm_i915_gem_exec_object2 {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * When the EXEC_OBJECT_PINNED flag is specified this is populated by
+ * the user with the GTT offset at which this object will be pinned.
+ * When the I915_EXEC_NO_RELOC flag is specified this must contain the
+ * presumed_offset of the object.
+ * During execbuffer2 the kernel populates it with the value of the
+ * current GTT offset of the object, for future presumed_offset writes.
+ */
+ __u64 offset;
+
+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT (1<<1)
+#define EXEC_OBJECT_WRITE (1<<2)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+#define EXEC_OBJECT_PINNED (1<<4)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1)
+ __u64 flags;
+
+ __u64 rsvd1;
+ __u64 rsvd2;
+};
+
+struct drm_i915_gem_execbuffer2 {
+ /**
+ * List of gem_exec_object2 structs
+ */
+ __u64 buffers_ptr;
+ __u32 buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ __u32 batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
+#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_DEFAULT (0<<0)
+#define I915_EXEC_RENDER (1<<0)
+#define I915_EXEC_BSD (2<<0)
+#define I915_EXEC_BLT (3<<0)
+#define I915_EXEC_VEBOX (4<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK (3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
+ __u64 flags;
+ __u64 rsvd1; /* now used for context info */
+ __u64 rsvd2;
+};
+
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE (1<<9)
+
+/** Inform the kernel that the batch is and will always be pinned. This
+ * negates the requirement for a workaround to be performed to avoid
+ * an incoherent CS (such as can be found on 830/845). If this flag is
+ * not passed, the kernel will endeavour to make sure the batch is
+ * coherent with the CS before execution. If this flag is passed,
+ * userspace assumes the responsibility for ensuring the same.
+ */
+#define I915_EXEC_IS_PINNED (1<<10)
+
+/** Provide a hint to the kernel that the command stream and auxiliary
+ * state buffers already holds the correct presumed addresses and so the
+ * relocation process may be skipped if no buffers need to be moved in
+ * preparation for the execbuffer.
+ */
+#define I915_EXEC_NO_RELOC (1<<11)
+
+/** Use the reloc.handle as an index into the exec object array rather
+ * than as the per-file handle.
+ */
+#define I915_EXEC_HANDLE_LUT (1<<12)
+
+/** Used for switching BSD rings on the platforms with two BSD rings */
+#define I915_EXEC_BSD_SHIFT (13)
+#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
+/* default ping-pong mode */
+#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
+
+/** Tell the kernel that the batchbuffer is processed by
+ * the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER (1<<15)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
+
+#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
+#define i915_execbuffer2_set_context_id(eb2, context) \
+ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
+#define i915_execbuffer2_get_context_id(eb2) \
+ ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ __u32 handle;
+ __u32 pad;
+
+ /** alignment required within the aperture */
+ __u64 alignment;
+
+ /** Returned GTT offset of the buffer. */
+ __u64 offset;
+};
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ __u32 handle;
+
+ /** Return busy status
+ *
+ * A return of 0 implies that the object is idle (after
+ * having flushed any pending activity), and a non-zero return that
+ * the object is still in-flight on the GPU. (The GPU has not yet
+ * signaled completion for all pending requests that reference the
+ * object.)
+ *
+ * The returned dword is split into two fields to indicate both
+ * the engines on which the object is being read, and the
+ * engine on which it is currently being written (if any).
+ *
+ * The low word (bits 0:15) indicate if the object is being written
+ * to by any engine (there can only be one, as the GEM implicit
+ * synchronisation rules force writes to be serialised). Only the
+ * engine for the last write is reported.
+ *
+ * The high word (bits 16:31) are a bitmask of which engines are
+ * currently reading from the object. Multiple engines may be
+ * reading from the object simultaneously.
+ *
+ * The value of each engine is the same as specified in the
+ * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
+ * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
+ * the I915_EXEC_RENDER engine for execution, and so it is never
+ * reported as active itself. Some hardware may have parallel
+ * execution engines, e.g. multiple media engines, which are
+ * mapped to the same identifier in the EXECBUFFER2 ioctl and
+ * so are not separately reported for busyness.
+ */
+ __u32 busy;
+};
+
+/**
+ * I915_CACHING_NONE
+ *
+ * GPU access is not coherent with cpu caches. Default for machines without an
+ * LLC.
+ */
+#define I915_CACHING_NONE 0
+/**
+ * I915_CACHING_CACHED
+ *
+ * GPU access is coherent with cpu caches and furthermore the data is cached in
+ * last-level caches shared between cpu cores and the gpu GT. Default on
+ * machines with HAS_LLC.
+ */
+#define I915_CACHING_CACHED 1
+/**
+ * I915_CACHING_DISPLAY
+ *
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no special
+ * cache mode (like write-through or gfdt flushing) is available. The kernel
+ * automatically sets this mode when using a buffer as a scanout target.
+ * Userspace can manually set this mode to avoid a costly stall and clflush in
+ * the hotpath of drawing the first frame.
+ */
+#define I915_CACHING_DISPLAY 2
+
+struct drm_i915_gem_caching {
+ /**
+ * Handle of the buffer to set/get the caching level of. */
+ __u32 handle;
+
+ /**
+ * Cacheing level to apply or return value
+ *
+ * bits0-15 are for generic caching control (i.e. the above defined
+ * values). bits16-31 are reserved for platform-specific variations
+ * (e.g. l3$ caching on gen7). */
+ __u32 caching;
+};
+
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+/* Seen by userland. */
+#define I915_BIT_6_SWIZZLE_9_17 6
+#define I915_BIT_6_SWIZZLE_9_10_17 7
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ __u32 handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ __u32 tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ __u32 stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ __u32 swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ __u32 handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ __u32 tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ __u32 swizzle_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping whilst bound.
+ */
+ __u32 phys_swizzle_mode;
+};
+
+struct drm_i915_gem_get_aperture {
+ /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
+ __u64 aper_size;
+
+ /**
+ * Available space in the aperture used by i915_gem_execbuffer, in
+ * bytes
+ */
+ __u64 aper_available_size;
+};
+
+struct drm_i915_get_pipe_from_crtc_id {
+ /** ID of CRTC being requested **/
+ __u32 crtc_id;
+
+ /** pipe of requested CRTC **/
+ __u32 pipe;
+};
+
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+#define __I915_MADV_PURGED 2 /* internal state */
+
+struct drm_i915_gem_madvise {
+ /** Handle of the buffer to change the backing store advice */
+ __u32 handle;
+
+ /* Advice: either the buffer will be needed again in the near future,
+ * or wont be and could be discarded under memory pressure.
+ */
+ __u32 madv;
+
+ /** Whether the backing store still exists. */
+ __u32 retained;
+};
+
+/* flags */
+#define I915_OVERLAY_TYPE_MASK 0xff
+#define I915_OVERLAY_YUV_PLANAR 0x01
+#define I915_OVERLAY_YUV_PACKED 0x02
+#define I915_OVERLAY_RGB 0x03
+
+#define I915_OVERLAY_DEPTH_MASK 0xff00
+#define I915_OVERLAY_RGB24 0x1000
+#define I915_OVERLAY_RGB16 0x2000
+#define I915_OVERLAY_RGB15 0x3000
+#define I915_OVERLAY_YUV422 0x0100
+#define I915_OVERLAY_YUV411 0x0200
+#define I915_OVERLAY_YUV420 0x0300
+#define I915_OVERLAY_YUV410 0x0400
+
+#define I915_OVERLAY_SWAP_MASK 0xff0000
+#define I915_OVERLAY_NO_SWAP 0x000000
+#define I915_OVERLAY_UV_SWAP 0x010000
+#define I915_OVERLAY_Y_SWAP 0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
+
+#define I915_OVERLAY_FLAGS_MASK 0xff000000
+#define I915_OVERLAY_ENABLE 0x01000000
+
+struct drm_intel_overlay_put_image {
+ /* various flags and src format description */
+ __u32 flags;
+ /* source picture description */
+ __u32 bo_handle;
+ /* stride values and offsets are in bytes, buffer relative */
+ __u16 stride_Y; /* stride for packed formats */
+ __u16 stride_UV;
+ __u32 offset_Y; /* offset for packet formats */
+ __u32 offset_U;
+ __u32 offset_V;
+ /* in pixels */
+ __u16 src_width;
+ __u16 src_height;
+ /* to compensate the scaling factors for partially covered surfaces */
+ __u16 src_scan_width;
+ __u16 src_scan_height;
+ /* output crtc description */
+ __u32 crtc_id;
+ __u16 dst_x;
+ __u16 dst_y;
+ __u16 dst_width;
+ __u16 dst_height;
+};
+
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
+struct drm_intel_overlay_attrs {
+ __u32 flags;
+ __u32 color_key;
+ __s32 brightness;
+ __u32 contrast;
+ __u32 saturation;
+ __u32 gamma0;
+ __u32 gamma1;
+ __u32 gamma2;
+ __u32 gamma3;
+ __u32 gamma4;
+ __u32 gamma5;
+};
+
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple. Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent. All other pixels will
+ * be displayed on top of the primary plane. For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_DESTINATION (1<<1)
+#define I915_SET_COLORKEY_SOURCE (1<<2)
+struct drm_intel_sprite_colorkey {
+ __u32 plane_id;
+ __u32 min_value;
+ __u32 channel_mask;
+ __u32 max_value;
+ __u32 flags;
+};
+
+struct drm_i915_gem_wait {
+ /** Handle of BO we shall wait on */
+ __u32 bo_handle;
+ __u32 flags;
+ /** Number of nanoseconds to wait, Returns time remaining. */
+ __s64 timeout_ns;
+};
+
+struct drm_i915_gem_context_create {
+ /* output: id of new context*/
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct drm_i915_gem_context_destroy {
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct drm_i915_reg_read {
+ /*
+ * Register offset.
+ * For 64bit wide registers where the upper 32bits don't immediately
+ * follow the lower 32bits, the offset of the lower 32bits must
+ * be specified
+ */
+ __u64 offset;
+ __u64 val; /* Return value */
+};
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ * single instruction 8byte read, in order to workaround that use
+ * offset (0x2538 | 1) instead.
+ *
+ */
+
+struct drm_i915_reset_stats {
+ __u32 ctx_id;
+ __u32 flags;
+
+ /* All resets since boot/module reload, for all contexts */
+ __u32 reset_count;
+
+ /* Number of batches lost when active in GPU, for this context */
+ __u32 batch_active;
+
+ /* Number of batches lost pending for execution, for this context */
+ __u32 batch_pending;
+
+ __u32 pad;
+};
+
+struct drm_i915_gem_userptr {
+ __u64 user_ptr;
+ __u64 user_size;
+ __u32 flags;
+#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
+struct drm_i915_gem_context_param {
+ __u32 ctx_id;
+ __u32 size;
+ __u64 param;
+#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
+#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
+ __u64 value;
+};
+
+#endif /* _I915_DRM_H_ */
diff --git a/chromium/third_party/libdrm/src/include/drm/mach64_drm.h b/chromium/third_party/libdrm/src/include/drm/mach64_drm.h
new file mode 100644
index 00000000000..1f5fd84288e
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/mach64_drm.h
@@ -0,0 +1,256 @@
+/* mach64_drm.h -- Public header for the mach64 driver -*- linux-c -*-
+ * Created: Thu Nov 30 20:04:32 2000 by gareth@valinux.com
+ */
+/*
+ * Copyright 2000 Gareth Hughes
+ * Copyright 2002 Frank C. Earl
+ * Copyright 2002-2003 Leif Delgass
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Frank C. Earl <fearl@airmail.net>
+ * Leif Delgass <ldelgass@retinalburn.net>
+ */
+
+#ifndef __MACH64_DRM_H__
+#define __MACH64_DRM_H__
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (mach64_sarea.h)
+ */
+#ifndef __MACH64_SAREA_DEFINES__
+#define __MACH64_SAREA_DEFINES__
+
+/* What needs to be changed for the current vertex buffer?
+ * GH: We're going to be pedantic about this. We want the card to do as
+ * little as possible, so let's avoid having it fetch a whole bunch of
+ * register values that don't change all that often, if at all.
+ */
+#define MACH64_UPLOAD_DST_OFF_PITCH 0x0001
+#define MACH64_UPLOAD_Z_OFF_PITCH 0x0002
+#define MACH64_UPLOAD_Z_ALPHA_CNTL 0x0004
+#define MACH64_UPLOAD_SCALE_3D_CNTL 0x0008
+#define MACH64_UPLOAD_DP_FOG_CLR 0x0010
+#define MACH64_UPLOAD_DP_WRITE_MASK 0x0020
+#define MACH64_UPLOAD_DP_PIX_WIDTH 0x0040
+#define MACH64_UPLOAD_SETUP_CNTL 0x0080
+#define MACH64_UPLOAD_MISC 0x0100
+#define MACH64_UPLOAD_TEXTURE 0x0200
+#define MACH64_UPLOAD_TEX0IMAGE 0x0400
+#define MACH64_UPLOAD_TEX1IMAGE 0x0800
+#define MACH64_UPLOAD_CLIPRECTS 0x1000 /* handled client-side */
+#define MACH64_UPLOAD_CONTEXT 0x00ff
+#define MACH64_UPLOAD_ALL 0x1fff
+
+/* DMA buffer size
+ */
+#define MACH64_BUFFER_SIZE 16384
+
+/* Max number of swaps allowed on the ring
+ * before the client must wait
+ */
+#define MACH64_MAX_QUEUED_FRAMES 3U
+
+/* Byte offsets for host blit buffer data
+ */
+#define MACH64_HOSTDATA_BLIT_OFFSET 104
+
+/* Keep these small for testing.
+ */
+#define MACH64_NR_SAREA_CLIPRECTS 8
+
+#define MACH64_CARD_HEAP 0
+#define MACH64_AGP_HEAP 1
+#define MACH64_NR_TEX_HEAPS 2
+#define MACH64_NR_TEX_REGIONS 64
+#define MACH64_LOG_TEX_GRANULARITY 16
+
+#define MACH64_TEX_MAXLEVELS 1
+
+#define MACH64_NR_CONTEXT_REGS 15
+#define MACH64_NR_TEXTURE_REGS 4
+
+#endif /* __MACH64_SAREA_DEFINES__ */
+
+typedef struct {
+ unsigned int dst_off_pitch;
+
+ unsigned int z_off_pitch;
+ unsigned int z_cntl;
+ unsigned int alpha_tst_cntl;
+
+ unsigned int scale_3d_cntl;
+
+ unsigned int sc_left_right;
+ unsigned int sc_top_bottom;
+
+ unsigned int dp_fog_clr;
+ unsigned int dp_write_mask;
+ unsigned int dp_pix_width;
+ unsigned int dp_mix;
+ unsigned int dp_src;
+
+ unsigned int clr_cmp_cntl;
+ unsigned int gui_traj_cntl;
+
+ unsigned int setup_cntl;
+
+ unsigned int tex_size_pitch;
+ unsigned int tex_cntl;
+ unsigned int secondary_tex_off;
+ unsigned int tex_offset;
+} drm_mach64_context_regs_t;
+
+typedef struct drm_mach64_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex dma buffer.
+ */
+ drm_mach64_context_regs_t context_state;
+ unsigned int dirty;
+ unsigned int vertsize;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int frames_queued;
+
+ /* Texture memory LRU.
+ */
+ struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
+ 1];
+ unsigned int tex_age[MACH64_NR_TEX_HEAPS];
+ int ctx_owner;
+} drm_mach64_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (mach64_common.h)
+ */
+
+/* Mach64 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+
+#define DRM_MACH64_INIT 0x00
+#define DRM_MACH64_IDLE 0x01
+#define DRM_MACH64_RESET 0x02
+#define DRM_MACH64_SWAP 0x03
+#define DRM_MACH64_CLEAR 0x04
+#define DRM_MACH64_VERTEX 0x05
+#define DRM_MACH64_BLIT 0x06
+#define DRM_MACH64_FLUSH 0x07
+#define DRM_MACH64_GETPARAM 0x08
+
+#define DRM_IOCTL_MACH64_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_INIT, drm_mach64_init_t)
+#define DRM_IOCTL_MACH64_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_IDLE )
+#define DRM_IOCTL_MACH64_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_RESET )
+#define DRM_IOCTL_MACH64_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_SWAP )
+#define DRM_IOCTL_MACH64_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_CLEAR, drm_mach64_clear_t)
+#define DRM_IOCTL_MACH64_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_VERTEX, drm_mach64_vertex_t)
+#define DRM_IOCTL_MACH64_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_BLIT, drm_mach64_blit_t)
+#define DRM_IOCTL_MACH64_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_FLUSH )
+#define DRM_IOCTL_MACH64_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t)
+
+/* Buffer flags for clears
+ */
+#define MACH64_FRONT 0x1
+#define MACH64_BACK 0x2
+#define MACH64_DEPTH 0x4
+
+/* Primitive types for vertex buffers
+ */
+#define MACH64_PRIM_POINTS 0x00000000
+#define MACH64_PRIM_LINES 0x00000001
+#define MACH64_PRIM_LINE_LOOP 0x00000002
+#define MACH64_PRIM_LINE_STRIP 0x00000003
+#define MACH64_PRIM_TRIANGLES 0x00000004
+#define MACH64_PRIM_TRIANGLE_STRIP 0x00000005
+#define MACH64_PRIM_TRIANGLE_FAN 0x00000006
+#define MACH64_PRIM_QUADS 0x00000007
+#define MACH64_PRIM_QUAD_STRIP 0x00000008
+#define MACH64_PRIM_POLYGON 0x00000009
+
+typedef enum _drm_mach64_dma_mode_t {
+ MACH64_MODE_DMA_ASYNC,
+ MACH64_MODE_DMA_SYNC,
+ MACH64_MODE_MMIO
+} drm_mach64_dma_mode_t;
+
+typedef struct drm_mach64_init {
+ enum {
+ DRM_MACH64_INIT_DMA = 0x01,
+ DRM_MACH64_CLEANUP_DMA = 0x02
+ } func;
+
+ unsigned long sarea_priv_offset;
+ int is_pci;
+ drm_mach64_dma_mode_t dma_mode;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long ring_offset;
+ unsigned long buffers_offset;
+ unsigned long agp_textures_offset;
+} drm_mach64_init_t;
+
+typedef struct drm_mach64_clear {
+ unsigned int flags;
+ int x, y, w, h;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+} drm_mach64_clear_t;
+
+typedef struct drm_mach64_vertex {
+ int prim;
+ void *buf; /* Address of vertex buffer */
+ unsigned long used; /* Number of bytes in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_mach64_vertex_t;
+
+typedef struct drm_mach64_blit {
+ void *buf;
+ int pitch;
+ int offset;
+ int format;
+ unsigned short x, y;
+ unsigned short width, height;
+} drm_mach64_blit_t;
+
+typedef struct drm_mach64_getparam {
+ enum {
+ MACH64_PARAM_FRAMES_QUEUED = 0x01,
+ MACH64_PARAM_IRQ_NR = 0x02
+ } param;
+ void *value;
+} drm_mach64_getparam_t;
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/mga_drm.h b/chromium/third_party/libdrm/src/include/drm/mga_drm.h
new file mode 100644
index 00000000000..b630e8fad54
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/mga_drm.h
@@ -0,0 +1,419 @@
+/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
+ * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * Rewritten by:
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __MGA_DRM_H__
+#define __MGA_DRM_H__
+
+#include "drm.h"
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (mga_sarea.h)
+ */
+
+#ifndef __MGA_SAREA_DEFINES__
+#define __MGA_SAREA_DEFINES__
+
+/* WARP pipe flags
+ */
+#define MGA_F 0x1 /* fog */
+#define MGA_A 0x2 /* alpha */
+#define MGA_S 0x4 /* specular */
+#define MGA_T2 0x8 /* multitexture */
+
+#define MGA_WARP_TGZ 0
+#define MGA_WARP_TGZF (MGA_F)
+#define MGA_WARP_TGZA (MGA_A)
+#define MGA_WARP_TGZAF (MGA_F|MGA_A)
+#define MGA_WARP_TGZS (MGA_S)
+#define MGA_WARP_TGZSF (MGA_S|MGA_F)
+#define MGA_WARP_TGZSA (MGA_S|MGA_A)
+#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
+#define MGA_WARP_T2GZ (MGA_T2)
+#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
+#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
+#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
+#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
+#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
+#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
+#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
+
+#define MGA_MAX_G200_PIPES 8 /* no multitex */
+#define MGA_MAX_G400_PIPES 16
+#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
+#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
+
+#define MGA_CARD_TYPE_G200 1
+#define MGA_CARD_TYPE_G400 2
+#define MGA_CARD_TYPE_G450 3 /* not currently used */
+#define MGA_CARD_TYPE_G550 4
+
+#define MGA_FRONT 0x1
+#define MGA_BACK 0x2
+#define MGA_DEPTH 0x4
+
+/* What needs to be changed for the current vertex dma buffer?
+ */
+#define MGA_UPLOAD_CONTEXT 0x1
+#define MGA_UPLOAD_TEX0 0x2
+#define MGA_UPLOAD_TEX1 0x4
+#define MGA_UPLOAD_PIPE 0x8
+#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
+#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
+#define MGA_UPLOAD_2D 0x40
+#define MGA_WAIT_AGE 0x80 /* handled client-side */
+#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
+#if 0
+#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
+ quiescent */
+#endif
+
+/* 32 buffers of 64k each, total 2 meg.
+ */
+#define MGA_BUFFER_SIZE (1 << 16)
+#define MGA_NUM_BUFFERS 128
+
+/* Keep these small for testing.
+ */
+#define MGA_NR_SAREA_CLIPRECTS 8
+
+/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
+ * regions, subject to a minimum region size of (1<<16) == 64k.
+ *
+ * Clients may subdivide regions internally, but when sharing between
+ * clients, the region size is the minimum granularity.
+ */
+
+#define MGA_CARD_HEAP 0
+#define MGA_AGP_HEAP 1
+#define MGA_NR_TEX_HEAPS 2
+#define MGA_NR_TEX_REGIONS 16
+#define MGA_LOG_MIN_TEX_REGION_SIZE 16
+
+#define DRM_MGA_IDLE_RETRY 2048
+
+#endif /* __MGA_SAREA_DEFINES__ */
+
+/* Setup registers for 3D context
+ */
+typedef struct {
+ unsigned int dstorg;
+ unsigned int maccess;
+ unsigned int plnwt;
+ unsigned int dwgctl;
+ unsigned int alphactrl;
+ unsigned int fogcolor;
+ unsigned int wflag;
+ unsigned int tdualstage0;
+ unsigned int tdualstage1;
+ unsigned int fcol;
+ unsigned int stencil;
+ unsigned int stencilctl;
+} drm_mga_context_regs_t;
+
+/* Setup registers for 2D, X server
+ */
+typedef struct {
+ unsigned int pitch;
+} drm_mga_server_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int texctl;
+ unsigned int texctl2;
+ unsigned int texfilter;
+ unsigned int texbordercol;
+ unsigned int texorg;
+ unsigned int texwidth;
+ unsigned int texheight;
+ unsigned int texorg1;
+ unsigned int texorg2;
+ unsigned int texorg3;
+ unsigned int texorg4;
+} drm_mga_texture_regs_t;
+
+/* General aging mechanism
+ */
+typedef struct {
+ unsigned int head; /* Position of head pointer */
+ unsigned int wrap; /* Primary DMA wrap count */
+} drm_mga_age_t;
+
+typedef struct _drm_mga_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex dma buffer.
+ */
+ drm_mga_context_regs_t context_state;
+ drm_mga_server_regs_t server_state;
+ drm_mga_texture_regs_t tex_state[2];
+ unsigned int warp_pipe;
+ unsigned int dirty;
+ unsigned int vertsize;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Information about the most recently used 3d drawable. The
+ * client fills in the req_* fields, the server fills in the
+ * exported_ fields and puts the cliprects into boxes, above.
+ *
+ * The client clears the exported_drawable field before
+ * clobbering the boxes data.
+ */
+ unsigned int req_drawable; /* the X drawable id */
+ unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
+
+ unsigned int exported_drawable;
+ unsigned int exported_index;
+ unsigned int exported_stamp;
+ unsigned int exported_buffers;
+ unsigned int exported_nfront;
+ unsigned int exported_nback;
+ int exported_back_x, exported_front_x, exported_w;
+ int exported_back_y, exported_front_y, exported_h;
+ struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
+
+ /* Counters for aging textures and for client-side throttling.
+ */
+ unsigned int status[4];
+ unsigned int last_wrap;
+
+ drm_mga_age_t last_frame;
+ unsigned int last_enqueue; /* last time a buffer was enqueued */
+ unsigned int last_dispatch; /* age of the most recently dispatched buffer */
+ unsigned int last_quiescent; /* */
+
+ /* LRU lists for texture memory in agp space and on the card.
+ */
+ struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
+ unsigned int texAge[MGA_NR_TEX_HEAPS];
+
+ /* Mechanism to validate card state.
+ */
+ int ctxOwner;
+} drm_mga_sarea_t;
+
+/* MGA specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_MGA_INIT 0x00
+#define DRM_MGA_FLUSH 0x01
+#define DRM_MGA_RESET 0x02
+#define DRM_MGA_SWAP 0x03
+#define DRM_MGA_CLEAR 0x04
+#define DRM_MGA_VERTEX 0x05
+#define DRM_MGA_INDICES 0x06
+#define DRM_MGA_ILOAD 0x07
+#define DRM_MGA_BLIT 0x08
+#define DRM_MGA_GETPARAM 0x09
+
+/* 3.2:
+ * ioctls for operating on fences.
+ */
+#define DRM_MGA_SET_FENCE 0x0a
+#define DRM_MGA_WAIT_FENCE 0x0b
+#define DRM_MGA_DMA_BOOTSTRAP 0x0c
+
+#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
+#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
+#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
+#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
+#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
+#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
+#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
+#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
+#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
+#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
+#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, __u32)
+#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, __u32)
+#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
+
+typedef struct _drm_mga_warp_index {
+ int installed;
+ unsigned long phys_addr;
+ int size;
+} drm_mga_warp_index_t;
+
+typedef struct drm_mga_init {
+ enum {
+ MGA_INIT_DMA = 0x01,
+ MGA_CLEANUP_DMA = 0x02
+ } func;
+
+ unsigned long sarea_priv_offset;
+
+ int chipset;
+ int sgram;
+
+ unsigned int maccess;
+
+ unsigned int fb_cpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long status_offset;
+ unsigned long warp_offset;
+ unsigned long primary_offset;
+ unsigned long buffers_offset;
+} drm_mga_init_t;
+
+typedef struct drm_mga_dma_bootstrap {
+ /**
+ * \name AGP texture region
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
+ * be filled in with the actual AGP texture settings.
+ *
+ * \warning
+ * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
+ * is zero, it means that PCI memory (most likely through the use of
+ * an IOMMU) is being used for "AGP" textures.
+ */
+ /*@{ */
+ unsigned long texture_handle; /**< Handle used to map AGP textures. */
+ __u32 texture_size; /**< Size of the AGP texture region. */
+ /*@} */
+
+ /**
+ * Requested size of the primary DMA region.
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+ * filled in with the actual AGP mode. If AGP was not available
+ */
+ __u32 primary_size;
+
+ /**
+ * Requested number of secondary DMA buffers.
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+ * filled in with the actual number of secondary DMA buffers
+ * allocated. Particularly when PCI DMA is used, this may be
+ * (subtantially) less than the number requested.
+ */
+ __u32 secondary_bin_count;
+
+ /**
+ * Requested size of each secondary DMA buffer.
+ *
+ * While the kernel \b is free to reduce
+ * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
+ * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
+ */
+ __u32 secondary_bin_size;
+
+ /**
+ * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
+ * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
+ * zero, it means that PCI DMA should be used, even if AGP is
+ * possible.
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+ * filled in with the actual AGP mode. If AGP was not available
+ * (i.e., PCI DMA was used), this value will be zero.
+ */
+ __u32 agp_mode;
+
+ /**
+ * Desired AGP GART size, measured in megabytes.
+ */
+ __u8 agp_size;
+} drm_mga_dma_bootstrap_t;
+
+typedef struct drm_mga_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask;
+} drm_mga_clear_t;
+
+typedef struct drm_mga_vertex {
+ int idx; /* buffer to queue */
+ int used; /* bytes in use */
+ int discard; /* client finished with buffer? */
+} drm_mga_vertex_t;
+
+typedef struct drm_mga_indices {
+ int idx; /* buffer to queue */
+ unsigned int start;
+ unsigned int end;
+ int discard; /* client finished with buffer? */
+} drm_mga_indices_t;
+
+typedef struct drm_mga_iload {
+ int idx;
+ unsigned int dstorg;
+ unsigned int length;
+} drm_mga_iload_t;
+
+typedef struct _drm_mga_blit {
+ unsigned int planemask;
+ unsigned int srcorg;
+ unsigned int dstorg;
+ int src_pitch, dst_pitch;
+ int delta_sx, delta_sy;
+ int delta_dx, delta_dy;
+ int height, ydir; /* flip image vertically */
+ int source_pitch, dest_pitch;
+} drm_mga_blit_t;
+
+/* 3.1: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define MGA_PARAM_IRQ_NR 1
+
+/* 3.2: Query the actual card type. The DDX only distinguishes between
+ * G200 chips and non-G200 chips, which it calls G400. It turns out that
+ * there are some very sublte differences between the G4x0 chips and the G550
+ * chips. Using this parameter query, a client-side driver can detect the
+ * difference between a G4x0 and a G550.
+ */
+#define MGA_PARAM_CARD_TYPE 2
+
+typedef struct drm_mga_getparam {
+ int param;
+ void *value;
+} drm_mga_getparam_t;
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/nouveau_drm.h b/chromium/third_party/libdrm/src/include/drm/nouveau_drm.h
new file mode 100644
index 00000000000..e418f9f3899
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/nouveau_drm.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRM_H__
+#define __NOUVEAU_DRM_H__
+
+#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
+
+struct drm_nouveau_channel_alloc {
+ uint32_t fb_ctxdma_handle;
+ uint32_t tt_ctxdma_handle;
+
+ int channel;
+ uint32_t pushbuf_domains;
+
+ /* Notifier memory */
+ uint32_t notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ uint32_t handle;
+ uint32_t grclass;
+ } subchan[8];
+ uint32_t nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ int channel;
+};
+
+struct drm_nouveau_grobj_alloc {
+ int channel;
+ uint32_t handle;
+ int class;
+};
+
+struct drm_nouveau_notifierobj_alloc {
+ uint32_t channel;
+ uint32_t handle;
+ uint32_t size;
+ uint32_t offset;
+};
+
+struct drm_nouveau_gpuobj_free {
+ int channel;
+ uint32_t handle;
+};
+
+/* FIXME : maybe unify {GET,SET}PARAMs */
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
+#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
+#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+struct drm_nouveau_getparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+struct drm_nouveau_setparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
+#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
+#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
+#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
+
+#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
+#define NOUVEAU_GEM_TILE_16BPP 0x00000001
+#define NOUVEAU_GEM_TILE_32BPP 0x00000002
+#define NOUVEAU_GEM_TILE_ZETA 0x00000004
+#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
+
+struct drm_nouveau_gem_info {
+ uint32_t handle;
+ uint32_t domain;
+ uint64_t size;
+ uint64_t offset;
+ uint64_t map_handle;
+ uint32_t tile_mode;
+ uint32_t tile_flags;
+};
+
+struct drm_nouveau_gem_new {
+ struct drm_nouveau_gem_info info;
+ uint32_t channel_hint;
+ uint32_t align;
+};
+
+#define NOUVEAU_GEM_MAX_BUFFERS 1024
+struct drm_nouveau_gem_pushbuf_bo_presumed {
+ uint32_t valid;
+ uint32_t domain;
+ uint64_t offset;
+};
+
+struct drm_nouveau_gem_pushbuf_bo {
+ uint64_t user_priv;
+ uint32_t handle;
+ uint32_t read_domains;
+ uint32_t write_domains;
+ uint32_t valid_domains;
+ struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
+};
+
+#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
+#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
+#define NOUVEAU_GEM_RELOC_OR (1 << 2)
+#define NOUVEAU_GEM_MAX_RELOCS 1024
+struct drm_nouveau_gem_pushbuf_reloc {
+ uint32_t reloc_bo_index;
+ uint32_t reloc_bo_offset;
+ uint32_t bo_index;
+ uint32_t flags;
+ uint32_t data;
+ uint32_t vor;
+ uint32_t tor;
+};
+
+#define NOUVEAU_GEM_MAX_PUSH 512
+struct drm_nouveau_gem_pushbuf_push {
+ uint32_t bo_index;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t length;
+};
+
+struct drm_nouveau_gem_pushbuf {
+ uint32_t channel;
+ uint32_t nr_buffers;
+ uint64_t buffers;
+ uint32_t nr_relocs;
+ uint32_t nr_push;
+ uint64_t relocs;
+ uint64_t push;
+ uint32_t suffix0;
+ uint32_t suffix1;
+ uint64_t vram_available;
+ uint64_t gart_available;
+};
+
+#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
+#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
+#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
+struct drm_nouveau_gem_cpu_prep {
+ uint32_t handle;
+ uint32_t flags;
+};
+
+struct drm_nouveau_gem_cpu_fini {
+ uint32_t handle;
+};
+
+enum nouveau_bus_type {
+ NV_AGP = 0,
+ NV_PCI = 1,
+ NV_PCIE = 2,
+};
+
+struct drm_nouveau_sarea {
+};
+
+#define DRM_NOUVEAU_GETPARAM 0x00
+#define DRM_NOUVEAU_SETPARAM 0x01
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03
+#define DRM_NOUVEAU_GROBJ_ALLOC 0x04
+#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05
+#define DRM_NOUVEAU_GPUOBJ_FREE 0x06
+#define DRM_NOUVEAU_NVIF 0x07
+#define DRM_NOUVEAU_GEM_NEW 0x40
+#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
+#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
+#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
+#define DRM_NOUVEAU_GEM_INFO 0x44
+
+#endif /* __NOUVEAU_DRM_H__ */
diff --git a/chromium/third_party/libdrm/src/include/drm/qxl_drm.h b/chromium/third_party/libdrm/src/include/drm/qxl_drm.h
new file mode 100644
index 00000000000..1e331a867b5
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/qxl_drm.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2013 Red Hat
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef QXL_DRM_H
+#define QXL_DRM_H
+
+#include <stddef.h>
+#include "drm.h"
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ *
+ * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel
+ * compatibility Keep fields aligned to their size
+ */
+
+#define QXL_GEM_DOMAIN_CPU 0
+#define QXL_GEM_DOMAIN_VRAM 1
+#define QXL_GEM_DOMAIN_SURFACE 2
+
+#define DRM_QXL_ALLOC 0x00
+#define DRM_QXL_MAP 0x01
+#define DRM_QXL_EXECBUFFER 0x02
+#define DRM_QXL_UPDATE_AREA 0x03
+#define DRM_QXL_GETPARAM 0x04
+#define DRM_QXL_CLIENTCAP 0x05
+
+#define DRM_QXL_ALLOC_SURF 0x06
+
+struct drm_qxl_alloc {
+ uint32_t size;
+ uint32_t handle; /* 0 is an invalid handle */
+};
+
+struct drm_qxl_map {
+ uint64_t offset; /* use for mmap system call */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+/*
+ * dest is the bo we are writing the relocation into
+ * src is bo we are relocating.
+ * *(dest_handle.base_addr + dest_offset) = physical_address(src_handle.addr +
+ * src_offset)
+ */
+#define QXL_RELOC_TYPE_BO 1
+#define QXL_RELOC_TYPE_SURF 2
+
+struct drm_qxl_reloc {
+ uint64_t src_offset; /* offset into src_handle or src buffer */
+ uint64_t dst_offset; /* offset in dest handle */
+ uint32_t src_handle; /* dest handle to compute address from */
+ uint32_t dst_handle; /* 0 if to command buffer */
+ uint32_t reloc_type;
+ uint32_t pad;
+};
+
+struct drm_qxl_command {
+ uint64_t command; /* void* */
+ uint64_t relocs; /* struct drm_qxl_reloc* */
+ uint32_t type;
+ uint32_t command_size;
+ uint32_t relocs_num;
+ uint32_t pad;
+};
+
+/* XXX: call it drm_qxl_commands? */
+struct drm_qxl_execbuffer {
+ uint32_t flags; /* for future use */
+ uint32_t commands_num;
+ uint64_t commands; /* struct drm_qxl_command* */
+};
+
+struct drm_qxl_update_area {
+ uint32_t handle;
+ uint32_t top;
+ uint32_t left;
+ uint32_t bottom;
+ uint32_t right;
+ uint32_t pad;
+};
+
+#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
+#define QXL_PARAM_MAX_RELOCS 2
+struct drm_qxl_getparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+/* these are one bit values */
+struct drm_qxl_clientcap {
+ uint32_t index;
+ uint32_t pad;
+};
+
+struct drm_qxl_alloc_surf {
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
+ int32_t stride;
+ uint32_t handle;
+ uint32_t pad;
+};
+
+#define DRM_IOCTL_QXL_ALLOC \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC, struct drm_qxl_alloc)
+
+#define DRM_IOCTL_QXL_MAP \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_MAP, struct drm_qxl_map)
+
+#define DRM_IOCTL_QXL_EXECBUFFER \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_EXECBUFFER,\
+ struct drm_qxl_execbuffer)
+
+#define DRM_IOCTL_QXL_UPDATE_AREA \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_UPDATE_AREA,\
+ struct drm_qxl_update_area)
+
+#define DRM_IOCTL_QXL_GETPARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_GETPARAM,\
+ struct drm_qxl_getparam)
+
+#define DRM_IOCTL_QXL_CLIENTCAP \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_CLIENTCAP,\
+ struct drm_qxl_clientcap)
+
+#define DRM_IOCTL_QXL_ALLOC_SURF \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
+ struct drm_qxl_alloc_surf)
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/r128_drm.h b/chromium/third_party/libdrm/src/include/drm/r128_drm.h
new file mode 100644
index 00000000000..ede78ff9d41
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/r128_drm.h
@@ -0,0 +1,326 @@
+/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
+ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
+ */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Kevin E. Martin <martin@valinux.com>
+ */
+
+#ifndef __R128_DRM_H__
+#define __R128_DRM_H__
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (r128_sarea.h)
+ */
+#ifndef __R128_SAREA_DEFINES__
+#define __R128_SAREA_DEFINES__
+
+/* What needs to be changed for the current vertex buffer?
+ */
+#define R128_UPLOAD_CONTEXT 0x001
+#define R128_UPLOAD_SETUP 0x002
+#define R128_UPLOAD_TEX0 0x004
+#define R128_UPLOAD_TEX1 0x008
+#define R128_UPLOAD_TEX0IMAGES 0x010
+#define R128_UPLOAD_TEX1IMAGES 0x020
+#define R128_UPLOAD_CORE 0x040
+#define R128_UPLOAD_MASKS 0x080
+#define R128_UPLOAD_WINDOW 0x100
+#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
+#define R128_REQUIRE_QUIESCENCE 0x400
+#define R128_UPLOAD_ALL 0x7ff
+
+#define R128_FRONT 0x1
+#define R128_BACK 0x2
+#define R128_DEPTH 0x4
+
+/* Primitive types
+ */
+#define R128_POINTS 0x1
+#define R128_LINES 0x2
+#define R128_LINE_STRIP 0x3
+#define R128_TRIANGLES 0x4
+#define R128_TRIANGLE_FAN 0x5
+#define R128_TRIANGLE_STRIP 0x6
+
+/* Vertex/indirect buffer size
+ */
+#define R128_BUFFER_SIZE 16384
+
+/* Byte offsets for indirect buffer data
+ */
+#define R128_INDEX_PRIM_OFFSET 20
+#define R128_HOSTDATA_BLIT_OFFSET 32
+
+/* Keep these small for testing.
+ */
+#define R128_NR_SAREA_CLIPRECTS 12
+
+/* There are 2 heaps (local/AGP). Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define R128_LOCAL_TEX_HEAP 0
+#define R128_AGP_TEX_HEAP 1
+#define R128_NR_TEX_HEAPS 2
+#define R128_NR_TEX_REGIONS 64
+#define R128_LOG_TEX_GRANULARITY 16
+
+#define R128_NR_CONTEXT_REGS 12
+
+#define R128_MAX_TEXTURE_LEVELS 11
+#define R128_MAX_TEXTURE_UNITS 2
+
+#endif /* __R128_SAREA_DEFINES__ */
+
+typedef struct {
+ /* Context state - can be written in one large chunk */
+ unsigned int dst_pitch_offset_c;
+ unsigned int dp_gui_master_cntl_c;
+ unsigned int sc_top_left_c;
+ unsigned int sc_bottom_right_c;
+ unsigned int z_offset_c;
+ unsigned int z_pitch_c;
+ unsigned int z_sten_cntl_c;
+ unsigned int tex_cntl_c;
+ unsigned int misc_3d_state_cntl_reg;
+ unsigned int texture_clr_cmp_clr_c;
+ unsigned int texture_clr_cmp_msk_c;
+ unsigned int fog_color_c;
+
+ /* Texture state */
+ unsigned int tex_size_pitch_c;
+ unsigned int constant_color_c;
+
+ /* Setup state */
+ unsigned int pm4_vc_fpu_setup;
+ unsigned int setup_cntl;
+
+ /* Mask state */
+ unsigned int dp_write_mask;
+ unsigned int sten_ref_mask_c;
+ unsigned int plane_3d_mask_c;
+
+ /* Window state */
+ unsigned int window_xy_offset;
+
+ /* Core state */
+ unsigned int scale_3d_cntl;
+} drm_r128_context_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int tex_cntl;
+ unsigned int tex_combine_cntl;
+ unsigned int tex_size_pitch;
+ unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
+ unsigned int tex_border_color;
+} drm_r128_texture_regs_t;
+
+typedef struct drm_r128_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex buffer.
+ */
+ drm_r128_context_regs_t context_state;
+ drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
+ unsigned int dirty;
+ unsigned int vertsize;
+ unsigned int vc_format;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int last_frame;
+ unsigned int last_dispatch;
+
+ struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
+ unsigned int tex_age[R128_NR_TEX_HEAPS];
+ int ctx_owner;
+ int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
+ int pfCurrentPage; /* which buffer is being displayed? */
+} drm_r128_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmR128.h)
+ */
+
+/* Rage 128 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_R128_INIT 0x00
+#define DRM_R128_CCE_START 0x01
+#define DRM_R128_CCE_STOP 0x02
+#define DRM_R128_CCE_RESET 0x03
+#define DRM_R128_CCE_IDLE 0x04
+/* 0x05 not used */
+#define DRM_R128_RESET 0x06
+#define DRM_R128_SWAP 0x07
+#define DRM_R128_CLEAR 0x08
+#define DRM_R128_VERTEX 0x09
+#define DRM_R128_INDICES 0x0a
+#define DRM_R128_BLIT 0x0b
+#define DRM_R128_DEPTH 0x0c
+#define DRM_R128_STIPPLE 0x0d
+/* 0x0e not used */
+#define DRM_R128_INDIRECT 0x0f
+#define DRM_R128_FULLSCREEN 0x10
+#define DRM_R128_CLEAR2 0x11
+#define DRM_R128_GETPARAM 0x12
+#define DRM_R128_FLIP 0x13
+
+#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
+#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
+#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
+#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
+#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
+/* 0x05 not used */
+#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
+#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
+#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
+#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
+#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
+#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
+#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
+#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
+/* 0x0e not used */
+#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
+#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
+#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
+#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
+#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
+
+typedef struct drm_r128_init {
+ enum {
+ R128_INIT_CCE = 0x01,
+ R128_CLEANUP_CCE = 0x02
+ } func;
+ unsigned long sarea_priv_offset;
+ int is_pci;
+ int cce_mode;
+ int cce_secure;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+ unsigned int span_offset;
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long agp_textures_offset;
+} drm_r128_init_t;
+
+typedef struct drm_r128_cce_stop {
+ int flush;
+ int idle;
+} drm_r128_cce_stop_t;
+
+typedef struct drm_r128_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask;
+} drm_r128_clear_t;
+
+typedef struct drm_r128_vertex {
+ int prim;
+ int idx; /* Index of vertex buffer */
+ int count; /* Number of vertices in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_r128_vertex_t;
+
+typedef struct drm_r128_indices {
+ int prim;
+ int idx;
+ int start;
+ int end;
+ int discard; /* Client finished with buffer? */
+} drm_r128_indices_t;
+
+typedef struct drm_r128_blit {
+ int idx;
+ int pitch;
+ int offset;
+ int format;
+ unsigned short x, y;
+ unsigned short width, height;
+} drm_r128_blit_t;
+
+typedef struct drm_r128_depth {
+ enum {
+ R128_WRITE_SPAN = 0x01,
+ R128_WRITE_PIXELS = 0x02,
+ R128_READ_SPAN = 0x03,
+ R128_READ_PIXELS = 0x04
+ } func;
+ int n;
+ int *x;
+ int *y;
+ unsigned int *buffer;
+ unsigned char *mask;
+} drm_r128_depth_t;
+
+typedef struct drm_r128_stipple {
+ unsigned int *mask;
+} drm_r128_stipple_t;
+
+typedef struct drm_r128_indirect {
+ int idx;
+ int start;
+ int end;
+ int discard;
+} drm_r128_indirect_t;
+
+typedef struct drm_r128_fullscreen {
+ enum {
+ R128_INIT_FULLSCREEN = 0x01,
+ R128_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_r128_fullscreen_t;
+
+/* 2.3: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define R128_PARAM_IRQ_NR 1
+
+typedef struct drm_r128_getparam {
+ int param;
+ void *value;
+} drm_r128_getparam_t;
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/radeon_drm.h b/chromium/third_party/libdrm/src/include/drm/radeon_drm.h
new file mode 100644
index 00000000000..cd31794f63b
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/radeon_drm.h
@@ -0,0 +1,1046 @@
+/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef __RADEON_DRM_H__
+#define __RADEON_DRM_H__
+
+#include "drm.h"
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (radeon_sarea.h)
+ */
+#ifndef __RADEON_SAREA_DEFINES__
+#define __RADEON_SAREA_DEFINES__
+
+/* Old style state flags, required for sarea interface (1.1 and 1.2
+ * clears) and 1.2 drm_vertex2 ioctl.
+ */
+#define RADEON_UPLOAD_CONTEXT 0x00000001
+#define RADEON_UPLOAD_VERTFMT 0x00000002
+#define RADEON_UPLOAD_LINE 0x00000004
+#define RADEON_UPLOAD_BUMPMAP 0x00000008
+#define RADEON_UPLOAD_MASKS 0x00000010
+#define RADEON_UPLOAD_VIEWPORT 0x00000020
+#define RADEON_UPLOAD_SETUP 0x00000040
+#define RADEON_UPLOAD_TCL 0x00000080
+#define RADEON_UPLOAD_MISC 0x00000100
+#define RADEON_UPLOAD_TEX0 0x00000200
+#define RADEON_UPLOAD_TEX1 0x00000400
+#define RADEON_UPLOAD_TEX2 0x00000800
+#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
+#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
+#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
+#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
+#define RADEON_REQUIRE_QUIESCENCE 0x00010000
+#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */
+#define RADEON_UPLOAD_ALL 0x003effff
+#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
+
+/* New style per-packet identifiers for use in cmd_buffer ioctl with
+ * the RADEON_EMIT_PACKET command. Comments relate new packets to old
+ * state bits and the packet size:
+ */
+#define RADEON_EMIT_PP_MISC 0 /* context/7 */
+#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
+#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
+#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
+#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
+#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
+#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
+#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
+#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
+#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
+#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
+#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
+#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
+#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
+#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
+#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
+#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
+#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
+#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
+#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
+#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
+#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
+#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
+#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
+#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
+#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
+#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
+#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
+#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
+#define R200_EMIT_VAP_CTL 32 /* vap/1 */
+#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
+#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
+#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
+#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
+#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
+#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
+#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
+#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
+#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
+#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
+#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
+#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
+#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
+#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
+#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
+#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
+#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
+#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
+#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
+#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
+#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
+#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
+#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
+#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
+#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
+#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
+#define R200_EMIT_PP_CUBIC_FACES_0 61
+#define R200_EMIT_PP_CUBIC_OFFSETS_0 62
+#define R200_EMIT_PP_CUBIC_FACES_1 63
+#define R200_EMIT_PP_CUBIC_OFFSETS_1 64
+#define R200_EMIT_PP_CUBIC_FACES_2 65
+#define R200_EMIT_PP_CUBIC_OFFSETS_2 66
+#define R200_EMIT_PP_CUBIC_FACES_3 67
+#define R200_EMIT_PP_CUBIC_OFFSETS_3 68
+#define R200_EMIT_PP_CUBIC_FACES_4 69
+#define R200_EMIT_PP_CUBIC_OFFSETS_4 70
+#define R200_EMIT_PP_CUBIC_FACES_5 71
+#define R200_EMIT_PP_CUBIC_OFFSETS_5 72
+#define RADEON_EMIT_PP_TEX_SIZE_0 73
+#define RADEON_EMIT_PP_TEX_SIZE_1 74
+#define RADEON_EMIT_PP_TEX_SIZE_2 75
+#define R200_EMIT_RB3D_BLENDCOLOR 76
+#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77
+#define RADEON_EMIT_PP_CUBIC_FACES_0 78
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79
+#define RADEON_EMIT_PP_CUBIC_FACES_1 80
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81
+#define RADEON_EMIT_PP_CUBIC_FACES_2 82
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83
+#define R200_EMIT_PP_TRI_PERF_CNTL 84
+#define R200_EMIT_PP_AFS_0 85
+#define R200_EMIT_PP_AFS_1 86
+#define R200_EMIT_ATF_TFACTOR 87
+#define R200_EMIT_PP_TXCTLALL_0 88
+#define R200_EMIT_PP_TXCTLALL_1 89
+#define R200_EMIT_PP_TXCTLALL_2 90
+#define R200_EMIT_PP_TXCTLALL_3 91
+#define R200_EMIT_PP_TXCTLALL_4 92
+#define R200_EMIT_PP_TXCTLALL_5 93
+#define R200_EMIT_VAP_PVS_CNTL 94
+#define RADEON_MAX_STATE_PACKETS 95
+
+/* Commands understood by cmd_buffer ioctl. More can be added but
+ * obviously these can't be removed or changed:
+ */
+#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */
+#define RADEON_CMD_SCALARS 2 /* emit scalar data */
+#define RADEON_CMD_VECTORS 3 /* emit vector data */
+#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
+#define RADEON_CMD_PACKET3 5 /* emit hw packet */
+#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
+#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
+#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
+ * doesn't make the cpu wait, just
+ * the graphics hardware */
+#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */
+
+typedef union {
+ int i;
+ struct {
+ unsigned char cmd_type, pad0, pad1, pad2;
+ } header;
+ struct {
+ unsigned char cmd_type, packet_id, pad0, pad1;
+ } packet;
+ struct {
+ unsigned char cmd_type, offset, stride, count;
+ } scalars;
+ struct {
+ unsigned char cmd_type, offset, stride, count;
+ } vectors;
+ struct {
+ unsigned char cmd_type, addr_lo, addr_hi, count;
+ } veclinear;
+ struct {
+ unsigned char cmd_type, buf_idx, pad0, pad1;
+ } dma;
+ struct {
+ unsigned char cmd_type, flags, pad0, pad1;
+ } wait;
+} drm_radeon_cmd_header_t;
+
+#define RADEON_WAIT_2D 0x1
+#define RADEON_WAIT_3D 0x2
+
+/* Allowed parameters for R300_CMD_PACKET3
+ */
+#define R300_CMD_PACKET3_CLEAR 0
+#define R300_CMD_PACKET3_RAW 1
+
+/* Commands understood by cmd_buffer ioctl for R300.
+ * The interface has not been stabilized, so some of these may be removed
+ * and eventually reordered before stabilization.
+ */
+#define R300_CMD_PACKET0 1
+#define R300_CMD_VPU 2 /* emit vertex program upload */
+#define R300_CMD_PACKET3 3 /* emit a packet3 */
+#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
+#define R300_CMD_CP_DELAY 5
+#define R300_CMD_DMA_DISCARD 6
+#define R300_CMD_WAIT 7
+# define R300_WAIT_2D 0x1
+# define R300_WAIT_3D 0x2
+/* these two defines are DOING IT WRONG - however
+ * we have userspace which relies on using these.
+ * The wait interface is backwards compat new
+ * code should use the NEW_WAIT defines below
+ * THESE ARE NOT BIT FIELDS
+ */
+# define R300_WAIT_2D_CLEAN 0x3
+# define R300_WAIT_3D_CLEAN 0x4
+
+# define R300_NEW_WAIT_2D_3D 0x3
+# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
+# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
+# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
+
+#define R300_CMD_SCRATCH 8
+#define R300_CMD_R500FP 9
+
+typedef union {
+ unsigned int u;
+ struct {
+ unsigned char cmd_type, pad0, pad1, pad2;
+ } header;
+ struct {
+ unsigned char cmd_type, count, reglo, reghi;
+ } packet0;
+ struct {
+ unsigned char cmd_type, count, adrlo, adrhi;
+ } vpu;
+ struct {
+ unsigned char cmd_type, packet, pad0, pad1;
+ } packet3;
+ struct {
+ unsigned char cmd_type, packet;
+ unsigned short count; /* amount of packet2 to emit */
+ } delay;
+ struct {
+ unsigned char cmd_type, buf_idx, pad0, pad1;
+ } dma;
+ struct {
+ unsigned char cmd_type, flags, pad0, pad1;
+ } wait;
+ struct {
+ unsigned char cmd_type, reg, n_bufs, flags;
+ } scratch;
+ struct {
+ unsigned char cmd_type, count, adrlo, adrhi_flags;
+ } r500fp;
+} drm_r300_cmd_header_t;
+
+#define RADEON_FRONT 0x1
+#define RADEON_BACK 0x2
+#define RADEON_DEPTH 0x4
+#define RADEON_STENCIL 0x8
+#define RADEON_CLEAR_FASTZ 0x80000000
+#define RADEON_USE_HIERZ 0x40000000
+#define RADEON_USE_COMP_ZBUF 0x20000000
+
+#define R500FP_CONSTANT_TYPE (1 << 1)
+#define R500FP_CONSTANT_CLAMP (1 << 2)
+
+/* Primitive types
+ */
+#define RADEON_POINTS 0x1
+#define RADEON_LINES 0x2
+#define RADEON_LINE_STRIP 0x3
+#define RADEON_TRIANGLES 0x4
+#define RADEON_TRIANGLE_FAN 0x5
+#define RADEON_TRIANGLE_STRIP 0x6
+
+/* Vertex/indirect buffer size
+ */
+#define RADEON_BUFFER_SIZE 65536
+
+/* Byte offsets for indirect buffer data
+ */
+#define RADEON_INDEX_PRIM_OFFSET 20
+
+#define RADEON_SCRATCH_REG_OFFSET 32
+
+#define R600_SCRATCH_REG_OFFSET 256
+
+#define RADEON_NR_SAREA_CLIPRECTS 12
+
+/* There are 2 heaps (local/GART). Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define RADEON_LOCAL_TEX_HEAP 0
+#define RADEON_GART_TEX_HEAP 1
+#define RADEON_NR_TEX_HEAPS 2
+#define RADEON_NR_TEX_REGIONS 64
+#define RADEON_LOG_TEX_GRANULARITY 16
+
+#define RADEON_MAX_TEXTURE_LEVELS 12
+#define RADEON_MAX_TEXTURE_UNITS 3
+
+#define RADEON_MAX_SURFACES 8
+
+/* Blits have strict offset rules. All blit offset must be aligned on
+ * a 1K-byte boundary.
+ */
+#define RADEON_OFFSET_SHIFT 10
+#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
+#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
+
+#endif /* __RADEON_SAREA_DEFINES__ */
+
+typedef struct {
+ unsigned int red;
+ unsigned int green;
+ unsigned int blue;
+ unsigned int alpha;
+} radeon_color_regs_t;
+
+typedef struct {
+ /* Context state */
+ unsigned int pp_misc; /* 0x1c14 */
+ unsigned int pp_fog_color;
+ unsigned int re_solid_color;
+ unsigned int rb3d_blendcntl;
+ unsigned int rb3d_depthoffset;
+ unsigned int rb3d_depthpitch;
+ unsigned int rb3d_zstencilcntl;
+
+ unsigned int pp_cntl; /* 0x1c38 */
+ unsigned int rb3d_cntl;
+ unsigned int rb3d_coloroffset;
+ unsigned int re_width_height;
+ unsigned int rb3d_colorpitch;
+ unsigned int se_cntl;
+
+ /* Vertex format state */
+ unsigned int se_coord_fmt; /* 0x1c50 */
+
+ /* Line state */
+ unsigned int re_line_pattern; /* 0x1cd0 */
+ unsigned int re_line_state;
+
+ unsigned int se_line_width; /* 0x1db8 */
+
+ /* Bumpmap state */
+ unsigned int pp_lum_matrix; /* 0x1d00 */
+
+ unsigned int pp_rot_matrix_0; /* 0x1d58 */
+ unsigned int pp_rot_matrix_1;
+
+ /* Mask state */
+ unsigned int rb3d_stencilrefmask; /* 0x1d7c */
+ unsigned int rb3d_ropcntl;
+ unsigned int rb3d_planemask;
+
+ /* Viewport state */
+ unsigned int se_vport_xscale; /* 0x1d98 */
+ unsigned int se_vport_xoffset;
+ unsigned int se_vport_yscale;
+ unsigned int se_vport_yoffset;
+ unsigned int se_vport_zscale;
+ unsigned int se_vport_zoffset;
+
+ /* Setup state */
+ unsigned int se_cntl_status; /* 0x2140 */
+
+ /* Misc state */
+ unsigned int re_top_left; /* 0x26c0 */
+ unsigned int re_misc;
+} drm_radeon_context_regs_t;
+
+typedef struct {
+ /* Zbias state */
+ unsigned int se_zbias_factor; /* 0x1dac */
+ unsigned int se_zbias_constant;
+} drm_radeon_context2_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int pp_txfilter;
+ unsigned int pp_txformat;
+ unsigned int pp_txoffset;
+ unsigned int pp_txcblend;
+ unsigned int pp_txablend;
+ unsigned int pp_tfactor;
+ unsigned int pp_border_color;
+} drm_radeon_texture_regs_t;
+
+typedef struct {
+ unsigned int start;
+ unsigned int finish;
+ unsigned int prim:8;
+ unsigned int stateidx:8;
+ unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
+ unsigned int vc_format; /* vertex format */
+} drm_radeon_prim_t;
+
+typedef struct {
+ drm_radeon_context_regs_t context;
+ drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
+ drm_radeon_context2_regs_t context2;
+ unsigned int dirty;
+} drm_radeon_state_t;
+
+typedef struct {
+ /* The channel for communication of state information to the
+ * kernel on firing a vertex buffer with either of the
+ * obsoleted vertex/index ioctls.
+ */
+ drm_radeon_context_regs_t context_state;
+ drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
+ unsigned int dirty;
+ unsigned int vertsize;
+ unsigned int vc_format;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int last_frame;
+ unsigned int last_dispatch;
+ unsigned int last_clear;
+
+ struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
+ 1];
+ unsigned int tex_age[RADEON_NR_TEX_HEAPS];
+ int ctx_owner;
+ int pfState; /* number of 3d windows (0,1,2ormore) */
+ int pfCurrentPage; /* which buffer is being displayed? */
+ int crtc2_base; /* CRTC2 frame offset */
+ int tiling_enabled; /* set by drm, read by 2d + 3d clients */
+} drm_radeon_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmRadeon.h)
+ *
+ * KW: actually it's illegal to change any of this (backwards compatibility).
+ */
+
+/* Radeon specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_RADEON_CP_INIT 0x00
+#define DRM_RADEON_CP_START 0x01
+#define DRM_RADEON_CP_STOP 0x02
+#define DRM_RADEON_CP_RESET 0x03
+#define DRM_RADEON_CP_IDLE 0x04
+#define DRM_RADEON_RESET 0x05
+#define DRM_RADEON_FULLSCREEN 0x06
+#define DRM_RADEON_SWAP 0x07
+#define DRM_RADEON_CLEAR 0x08
+#define DRM_RADEON_VERTEX 0x09
+#define DRM_RADEON_INDICES 0x0A
+#define DRM_RADEON_NOT_USED
+#define DRM_RADEON_STIPPLE 0x0C
+#define DRM_RADEON_INDIRECT 0x0D
+#define DRM_RADEON_TEXTURE 0x0E
+#define DRM_RADEON_VERTEX2 0x0F
+#define DRM_RADEON_CMDBUF 0x10
+#define DRM_RADEON_GETPARAM 0x11
+#define DRM_RADEON_FLIP 0x12
+#define DRM_RADEON_ALLOC 0x13
+#define DRM_RADEON_FREE 0x14
+#define DRM_RADEON_INIT_HEAP 0x15
+#define DRM_RADEON_IRQ_EMIT 0x16
+#define DRM_RADEON_IRQ_WAIT 0x17
+#define DRM_RADEON_CP_RESUME 0x18
+#define DRM_RADEON_SETPARAM 0x19
+#define DRM_RADEON_SURF_ALLOC 0x1a
+#define DRM_RADEON_SURF_FREE 0x1b
+/* KMS ioctl */
+#define DRM_RADEON_GEM_INFO 0x1c
+#define DRM_RADEON_GEM_CREATE 0x1d
+#define DRM_RADEON_GEM_MMAP 0x1e
+#define DRM_RADEON_GEM_PREAD 0x21
+#define DRM_RADEON_GEM_PWRITE 0x22
+#define DRM_RADEON_GEM_SET_DOMAIN 0x23
+#define DRM_RADEON_GEM_WAIT_IDLE 0x24
+#define DRM_RADEON_CS 0x26
+#define DRM_RADEON_INFO 0x27
+#define DRM_RADEON_GEM_SET_TILING 0x28
+#define DRM_RADEON_GEM_GET_TILING 0x29
+#define DRM_RADEON_GEM_BUSY 0x2a
+#define DRM_RADEON_GEM_VA 0x2b
+#define DRM_RADEON_GEM_OP 0x2c
+
+#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
+#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
+#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
+#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
+#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
+#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET)
+#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
+#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP)
+#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
+#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
+#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
+#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
+#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
+#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
+#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
+#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
+#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
+#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP)
+#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
+#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
+#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
+#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
+#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
+#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
+#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
+#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
+#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
+/* KMS */
+#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
+#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
+#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
+#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
+#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
+#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
+#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
+#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
+#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
+#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
+#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
+#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
+#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
+#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
+
+typedef struct drm_radeon_init {
+ enum {
+ RADEON_INIT_CP = 0x01,
+ RADEON_CLEANUP_CP = 0x02,
+ RADEON_INIT_R200_CP = 0x03,
+ RADEON_INIT_R300_CP = 0x04,
+ RADEON_INIT_R600_CP = 0x05
+ } func;
+ unsigned long sarea_priv_offset;
+ int is_pci;
+ int cp_mode;
+ int gart_size;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long gart_textures_offset;
+} drm_radeon_init_t;
+
+typedef struct drm_radeon_cp_stop {
+ int flush;
+ int idle;
+} drm_radeon_cp_stop_t;
+
+typedef struct drm_radeon_fullscreen {
+ enum {
+ RADEON_INIT_FULLSCREEN = 0x01,
+ RADEON_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_radeon_fullscreen_t;
+
+#define CLEAR_X1 0
+#define CLEAR_Y1 1
+#define CLEAR_X2 2
+#define CLEAR_Y2 3
+#define CLEAR_DEPTH 4
+
+typedef union drm_radeon_clear_rect {
+ float f[5];
+ unsigned int ui[5];
+} drm_radeon_clear_rect_t;
+
+typedef struct drm_radeon_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask; /* misnamed field: should be stencil */
+ drm_radeon_clear_rect_t *depth_boxes;
+} drm_radeon_clear_t;
+
+typedef struct drm_radeon_vertex {
+ int prim;
+ int idx; /* Index of vertex buffer */
+ int count; /* Number of vertices in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_radeon_vertex_t;
+
+typedef struct drm_radeon_indices {
+ int prim;
+ int idx;
+ int start;
+ int end;
+ int discard; /* Client finished with buffer? */
+} drm_radeon_indices_t;
+
+/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
+ * - allows multiple primitives and state changes in a single ioctl
+ * - supports driver change to emit native primitives
+ */
+typedef struct drm_radeon_vertex2 {
+ int idx; /* Index of vertex buffer */
+ int discard; /* Client finished with buffer? */
+ int nr_states;
+ drm_radeon_state_t *state;
+ int nr_prims;
+ drm_radeon_prim_t *prim;
+} drm_radeon_vertex2_t;
+
+/* v1.3 - obsoletes drm_radeon_vertex2
+ * - allows arbitrarily large cliprect list
+ * - allows updating of tcl packet, vector and scalar state
+ * - allows memory-efficient description of state updates
+ * - allows state to be emitted without a primitive
+ * (for clears, ctx switches)
+ * - allows more than one dma buffer to be referenced per ioctl
+ * - supports tcl driver
+ * - may be extended in future versions with new cmd types, packets
+ */
+typedef struct drm_radeon_cmd_buffer {
+ int bufsz;
+ char *buf;
+ int nbox;
+ struct drm_clip_rect *boxes;
+} drm_radeon_cmd_buffer_t;
+
+typedef struct drm_radeon_tex_image {
+ unsigned int x, y; /* Blit coordinates */
+ unsigned int width, height;
+ const void *data;
+} drm_radeon_tex_image_t;
+
+typedef struct drm_radeon_texture {
+ unsigned int offset;
+ int pitch;
+ int format;
+ int width; /* Texture image coordinates */
+ int height;
+ drm_radeon_tex_image_t *image;
+} drm_radeon_texture_t;
+
+typedef struct drm_radeon_stipple {
+ unsigned int *mask;
+} drm_radeon_stipple_t;
+
+typedef struct drm_radeon_indirect {
+ int idx;
+ int start;
+ int end;
+ int discard;
+} drm_radeon_indirect_t;
+
+/* enum for card type parameters */
+#define RADEON_CARD_PCI 0
+#define RADEON_CARD_AGP 1
+#define RADEON_CARD_PCIE 2
+
+/* 1.3: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */
+#define RADEON_PARAM_LAST_FRAME 2
+#define RADEON_PARAM_LAST_DISPATCH 3
+#define RADEON_PARAM_LAST_CLEAR 4
+/* Added with DRM version 1.6. */
+#define RADEON_PARAM_IRQ_NR 5
+#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */
+/* Added with DRM version 1.8. */
+#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
+#define RADEON_PARAM_STATUS_HANDLE 8
+#define RADEON_PARAM_SAREA_HANDLE 9
+#define RADEON_PARAM_GART_TEX_HANDLE 10
+#define RADEON_PARAM_SCRATCH_OFFSET 11
+#define RADEON_PARAM_CARD_TYPE 12
+#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
+#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
+#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
+#define RADEON_PARAM_DEVICE_ID 16
+#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */
+
+typedef struct drm_radeon_getparam {
+ int param;
+ void *value;
+} drm_radeon_getparam_t;
+
+/* 1.6: Set up a memory manager for regions of shared memory:
+ */
+#define RADEON_MEM_REGION_GART 1
+#define RADEON_MEM_REGION_FB 2
+
+typedef struct drm_radeon_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int *region_offset; /* offset from start of fb or GART */
+} drm_radeon_mem_alloc_t;
+
+typedef struct drm_radeon_mem_free {
+ int region;
+ int region_offset;
+} drm_radeon_mem_free_t;
+
+typedef struct drm_radeon_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_radeon_mem_init_heap_t;
+
+/* 1.6: Userspace can request & wait on irq's:
+ */
+typedef struct drm_radeon_irq_emit {
+ int *irq_seq;
+} drm_radeon_irq_emit_t;
+
+typedef struct drm_radeon_irq_wait {
+ int irq_seq;
+} drm_radeon_irq_wait_t;
+
+/* 1.10: Clients tell the DRM where they think the framebuffer is located in
+ * the card's address space, via a new generic ioctl to set parameters
+ */
+
+typedef struct drm_radeon_setparam {
+ unsigned int param;
+ __s64 value;
+} drm_radeon_setparam_t;
+
+#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
+#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
+#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
+#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
+#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
+#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
+/* 1.14: Clients can allocate/free a surface
+ */
+typedef struct drm_radeon_surface_alloc {
+ unsigned int address;
+ unsigned int size;
+ unsigned int flags;
+} drm_radeon_surface_alloc_t;
+
+typedef struct drm_radeon_surface_free {
+ unsigned int address;
+} drm_radeon_surface_free_t;
+
+#define DRM_RADEON_VBLANK_CRTC1 1
+#define DRM_RADEON_VBLANK_CRTC2 2
+
+/*
+ * Kernel modesetting world below.
+ */
+#define RADEON_GEM_DOMAIN_CPU 0x1
+#define RADEON_GEM_DOMAIN_GTT 0x2
+#define RADEON_GEM_DOMAIN_VRAM 0x4
+
+struct drm_radeon_gem_info {
+ uint64_t gart_size;
+ uint64_t vram_size;
+ uint64_t vram_visible;
+};
+
+#define RADEON_GEM_NO_BACKING_STORE 1
+
+struct drm_radeon_gem_create {
+ uint64_t size;
+ uint64_t alignment;
+ uint32_t handle;
+ uint32_t initial_domain;
+ uint32_t flags;
+};
+
+#define RADEON_TILING_MACRO 0x1
+#define RADEON_TILING_MICRO 0x2
+#define RADEON_TILING_SWAP_16BIT 0x4
+#define RADEON_TILING_R600_NO_SCANOUT RADEON_TILING_SWAP_16BIT
+#define RADEON_TILING_SWAP_32BIT 0x8
+/* this object requires a surface when mapped - i.e. front buffer */
+#define RADEON_TILING_SURFACE 0x10
+#define RADEON_TILING_MICRO_SQUARE 0x20
+#define RADEON_TILING_EG_BANKW_SHIFT 8
+#define RADEON_TILING_EG_BANKW_MASK 0xf
+#define RADEON_TILING_EG_BANKH_SHIFT 12
+#define RADEON_TILING_EG_BANKH_MASK 0xf
+#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16
+#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf
+#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24
+#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf
+#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28
+#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
+
+struct drm_radeon_gem_set_tiling {
+ uint32_t handle;
+ uint32_t tiling_flags;
+ uint32_t pitch;
+};
+
+struct drm_radeon_gem_get_tiling {
+ uint32_t handle;
+ uint32_t tiling_flags;
+ uint32_t pitch;
+};
+
+struct drm_radeon_gem_mmap {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t addr_ptr;
+};
+
+struct drm_radeon_gem_set_domain {
+ uint32_t handle;
+ uint32_t read_domains;
+ uint32_t write_domain;
+};
+
+struct drm_radeon_gem_wait_idle {
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_radeon_gem_busy {
+ uint32_t handle;
+ uint32_t domain;
+};
+
+struct drm_radeon_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ /* void *, but pointers are not 32/64 compatible */
+ uint64_t data_ptr;
+};
+
+struct drm_radeon_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ /* void *, but pointers are not 32/64 compatible */
+ uint64_t data_ptr;
+};
+
+/* Sets or returns a value associated with a buffer. */
+struct drm_radeon_gem_op {
+ uint32_t handle; /* buffer */
+ uint32_t op; /* RADEON_GEM_OP_* */
+ uint64_t value; /* input or return value */
+};
+
+#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
+#define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1
+
+#define RADEON_VA_MAP 1
+#define RADEON_VA_UNMAP 2
+
+#define RADEON_VA_RESULT_OK 0
+#define RADEON_VA_RESULT_ERROR 1
+#define RADEON_VA_RESULT_VA_EXIST 2
+
+#define RADEON_VM_PAGE_VALID (1 << 0)
+#define RADEON_VM_PAGE_READABLE (1 << 1)
+#define RADEON_VM_PAGE_WRITEABLE (1 << 2)
+#define RADEON_VM_PAGE_SYSTEM (1 << 3)
+#define RADEON_VM_PAGE_SNOOPED (1 << 4)
+
+struct drm_radeon_gem_va {
+ uint32_t handle;
+ uint32_t operation;
+ uint32_t vm_id;
+ uint32_t flags;
+ uint64_t offset;
+};
+
+#define RADEON_CHUNK_ID_RELOCS 0x01
+#define RADEON_CHUNK_ID_IB 0x02
+#define RADEON_CHUNK_ID_FLAGS 0x03
+#define RADEON_CHUNK_ID_CONST_IB 0x04
+
+/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
+#define RADEON_CS_KEEP_TILING_FLAGS 0x01
+#define RADEON_CS_USE_VM 0x02
+#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */
+/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
+#define RADEON_CS_RING_GFX 0
+#define RADEON_CS_RING_COMPUTE 1
+#define RADEON_CS_RING_DMA 2
+#define RADEON_CS_RING_UVD 3
+#define RADEON_CS_RING_VCE 4
+/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
+/* 0 = normal, + = higher priority, - = lower priority */
+
+struct drm_radeon_cs_chunk {
+ uint32_t chunk_id;
+ uint32_t length_dw;
+ uint64_t chunk_data;
+};
+
+/* drm_radeon_cs_reloc.flags */
+
+struct drm_radeon_cs_reloc {
+ uint32_t handle;
+ uint32_t read_domains;
+ uint32_t write_domain;
+ uint32_t flags;
+};
+
+struct drm_radeon_cs {
+ uint32_t num_chunks;
+ uint32_t cs_id;
+ /* this points to uint64_t * which point to cs chunks */
+ uint64_t chunks;
+ /* updates to the limits after this CS ioctl */
+ uint64_t gart_limit;
+ uint64_t vram_limit;
+};
+
+#define RADEON_INFO_DEVICE_ID 0x00
+#define RADEON_INFO_NUM_GB_PIPES 0x01
+#define RADEON_INFO_NUM_Z_PIPES 0x02
+#define RADEON_INFO_ACCEL_WORKING 0x03
+#define RADEON_INFO_CRTC_FROM_ID 0x04
+#define RADEON_INFO_ACCEL_WORKING2 0x05
+#define RADEON_INFO_TILING_CONFIG 0x06
+#define RADEON_INFO_WANT_HYPERZ 0x07
+#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */
+#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
+#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */
+#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */
+#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */
+#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */
+/* virtual address start, va < start are reserved by the kernel */
+#define RADEON_INFO_VA_START 0x0e
+/* maximum size of ib using the virtual memory cs */
+#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
+/* max pipes - needed for compute shaders */
+#define RADEON_INFO_MAX_PIPES 0x10
+/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
+#define RADEON_INFO_TIMESTAMP 0x11
+/* max shader engines (SE) - needed for geometry shaders, etc. */
+#define RADEON_INFO_MAX_SE 0x12
+/* max SH per SE */
+#define RADEON_INFO_MAX_SH_PER_SE 0x13
+/* fast fb access is enabled */
+#define RADEON_INFO_FASTFB_WORKING 0x14
+/* query if a RADEON_CS_RING_* submission is supported */
+#define RADEON_INFO_RING_WORKING 0x15
+/* SI tile mode array */
+#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
+/* query if CP DMA is supported on the compute ring */
+#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
+/* CIK macrotile mode array */
+#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
+/* query the number of render backends */
+#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
+/* max engine clock - needed for OpenCL */
+#define RADEON_INFO_MAX_SCLK 0x1a
+/* version of VCE firmware */
+#define RADEON_INFO_VCE_FW_VERSION 0x1b
+/* version of VCE feedback */
+#define RADEON_INFO_VCE_FB_VERSION 0x1c
+#define RADEON_INFO_NUM_BYTES_MOVED 0x1d
+#define RADEON_INFO_VRAM_USAGE 0x1e
+#define RADEON_INFO_GTT_USAGE 0x1f
+
+
+struct drm_radeon_info {
+ uint32_t request;
+ uint32_t pad;
+ uint64_t value;
+};
+
+/* Those correspond to the tile index to use, this is to explicitly state
+ * the API that is implicitly defined by the tile mode array.
+ */
+#define SI_TILE_MODE_COLOR_LINEAR_ALIGNED 8
+#define SI_TILE_MODE_COLOR_1D 13
+#define SI_TILE_MODE_COLOR_1D_SCANOUT 9
+#define SI_TILE_MODE_COLOR_2D_8BPP 14
+#define SI_TILE_MODE_COLOR_2D_16BPP 15
+#define SI_TILE_MODE_COLOR_2D_32BPP 16
+#define SI_TILE_MODE_COLOR_2D_64BPP 17
+#define SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP 11
+#define SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP 12
+#define SI_TILE_MODE_DEPTH_STENCIL_1D 4
+#define SI_TILE_MODE_DEPTH_STENCIL_2D 0
+#define SI_TILE_MODE_DEPTH_STENCIL_2D_2AA 3
+#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
+#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
+
+#define CIK_TILE_MODE_COLOR_2D 14
+#define CIK_TILE_MODE_COLOR_2D_SCANOUT 10
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64 0
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128 1
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256 2
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_512 3
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_ROW_SIZE 4
+#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/savage_drm.h b/chromium/third_party/libdrm/src/include/drm/savage_drm.h
new file mode 100644
index 00000000000..f7a75eff007
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/savage_drm.h
@@ -0,0 +1,210 @@
+/* savage_drm.h -- Public header for the savage driver
+ *
+ * Copyright 2004 Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SAVAGE_DRM_H__
+#define __SAVAGE_DRM_H__
+
+#ifndef __SAVAGE_SAREA_DEFINES__
+#define __SAVAGE_SAREA_DEFINES__
+
+/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
+ * regions, subject to a minimum region size of (1<<16) == 64k.
+ *
+ * Clients may subdivide regions internally, but when sharing between
+ * clients, the region size is the minimum granularity.
+ */
+
+#define SAVAGE_CARD_HEAP 0
+#define SAVAGE_AGP_HEAP 1
+#define SAVAGE_NR_TEX_HEAPS 2
+#define SAVAGE_NR_TEX_REGIONS 16
+#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
+
+#endif /* __SAVAGE_SAREA_DEFINES__ */
+
+typedef struct _drm_savage_sarea {
+ /* LRU lists for texture memory in agp space and on the card.
+ */
+ struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
+ 1];
+ unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
+
+ /* Mechanism to validate card state.
+ */
+ int ctxOwner;
+} drm_savage_sarea_t, *drm_savage_sarea_ptr;
+
+/* Savage-specific ioctls
+ */
+#define DRM_SAVAGE_BCI_INIT 0x00
+#define DRM_SAVAGE_BCI_CMDBUF 0x01
+#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
+#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
+
+#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
+#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
+#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
+#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
+
+#define SAVAGE_DMA_PCI 1
+#define SAVAGE_DMA_AGP 3
+typedef struct drm_savage_init {
+ enum {
+ SAVAGE_INIT_BCI = 1,
+ SAVAGE_CLEANUP_BCI = 2
+ } func;
+ unsigned int sarea_priv_offset;
+
+ /* some parameters */
+ unsigned int cob_size;
+ unsigned int bci_threshold_lo, bci_threshold_hi;
+ unsigned int dma_type;
+
+ /* frame buffer layout */
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ /* local textures */
+ unsigned int texture_offset;
+ unsigned int texture_size;
+
+ /* physical locations of non-permanent maps */
+ unsigned long status_offset;
+ unsigned long buffers_offset;
+ unsigned long agp_textures_offset;
+ unsigned long cmd_dma_offset;
+} drm_savage_init_t;
+
+typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
+typedef struct drm_savage_cmdbuf {
+ /* command buffer in client's address space */
+ drm_savage_cmd_header_t *cmd_addr;
+ unsigned int size; /* size of the command buffer in 64bit units */
+
+ unsigned int dma_idx; /* DMA buffer index to use */
+ int discard; /* discard DMA buffer when done */
+ /* vertex buffer in client's address space */
+ unsigned int *vb_addr;
+ unsigned int vb_size; /* size of client vertex buffer in bytes */
+ unsigned int vb_stride; /* stride of vertices in 32bit words */
+ /* boxes in client's address space */
+ struct drm_clip_rect *box_addr;
+ unsigned int nbox; /* number of clipping boxes */
+} drm_savage_cmdbuf_t;
+
+#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
+#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
+#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
+typedef struct drm_savage_event {
+ unsigned int count;
+ unsigned int flags;
+} drm_savage_event_emit_t, drm_savage_event_wait_t;
+
+/* Commands for the cmdbuf ioctl
+ */
+#define SAVAGE_CMD_STATE 0 /* a range of state registers */
+#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
+#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
+#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
+#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
+#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
+#define SAVAGE_CMD_SWAP 6 /* swap buffers */
+
+/* Primitive types
+*/
+#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
+#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
+#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
+#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
+ * shading on s3d */
+
+/* Skip flags (vertex format)
+ */
+#define SAVAGE_SKIP_Z 0x01
+#define SAVAGE_SKIP_W 0x02
+#define SAVAGE_SKIP_C0 0x04
+#define SAVAGE_SKIP_C1 0x08
+#define SAVAGE_SKIP_S0 0x10
+#define SAVAGE_SKIP_T0 0x20
+#define SAVAGE_SKIP_ST0 0x30
+#define SAVAGE_SKIP_S1 0x40
+#define SAVAGE_SKIP_T1 0x80
+#define SAVAGE_SKIP_ST1 0xc0
+#define SAVAGE_SKIP_ALL_S3D 0x3f
+#define SAVAGE_SKIP_ALL_S4 0xff
+
+/* Buffer names for clear command
+ */
+#define SAVAGE_FRONT 0x1
+#define SAVAGE_BACK 0x2
+#define SAVAGE_DEPTH 0x4
+
+/* 64-bit command header
+ */
+union drm_savage_cmd_header {
+ struct {
+ unsigned char cmd; /* command */
+ unsigned char pad0;
+ unsigned short pad1;
+ unsigned short pad2;
+ unsigned short pad3;
+ } cmd; /* generic */
+ struct {
+ unsigned char cmd;
+ unsigned char global; /* need idle engine? */
+ unsigned short count; /* number of consecutive registers */
+ unsigned short start; /* first register */
+ unsigned short pad3;
+ } state; /* SAVAGE_CMD_STATE */
+ struct {
+ unsigned char cmd;
+ unsigned char prim; /* primitive type */
+ unsigned short skip; /* vertex format (skip flags) */
+ unsigned short count; /* number of vertices */
+ unsigned short start; /* first vertex in DMA/vertex buffer */
+ } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
+ struct {
+ unsigned char cmd;
+ unsigned char prim;
+ unsigned short skip;
+ unsigned short count; /* number of indices that follow */
+ unsigned short pad3;
+ } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
+ struct {
+ unsigned char cmd;
+ unsigned char pad0;
+ unsigned short pad1;
+ unsigned int flags;
+ } clear0; /* SAVAGE_CMD_CLEAR */
+ struct {
+ unsigned int mask;
+ unsigned int value;
+ } clear1; /* SAVAGE_CMD_CLEAR data */
+};
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/sis_drm.h b/chromium/third_party/libdrm/src/include/drm/sis_drm.h
new file mode 100644
index 00000000000..30f7b382746
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/sis_drm.h
@@ -0,0 +1,67 @@
+/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
+/*
+ * Copyright 2005 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __SIS_DRM_H__
+#define __SIS_DRM_H__
+
+/* SiS specific ioctls */
+#define NOT_USED_0_3
+#define DRM_SIS_FB_ALLOC 0x04
+#define DRM_SIS_FB_FREE 0x05
+#define NOT_USED_6_12
+#define DRM_SIS_AGP_INIT 0x13
+#define DRM_SIS_AGP_ALLOC 0x14
+#define DRM_SIS_AGP_FREE 0x15
+#define DRM_SIS_FB_INIT 0x16
+
+#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
+#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
+/*
+#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
+#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
+#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
+*/
+
+typedef struct {
+ int context;
+ unsigned int offset;
+ unsigned int size;
+ unsigned long free;
+} drm_sis_mem_t;
+
+typedef struct {
+ unsigned int offset, size;
+} drm_sis_agp_t;
+
+typedef struct {
+ unsigned int offset, size;
+} drm_sis_fb_t;
+
+#endif /* __SIS_DRM_H__ */
diff --git a/chromium/third_party/libdrm/src/include/drm/tegra_drm.h b/chromium/third_party/libdrm/src/include/drm/tegra_drm.h
new file mode 100644
index 00000000000..7c0fe0ed511
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/tegra_drm.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _UAPI_TEGRA_DRM_H_
+#define _UAPI_TEGRA_DRM_H_
+
+#include <drm.h>
+
+#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
+#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
+
+struct drm_tegra_gem_create {
+ __u64 size;
+ __u32 flags;
+ __u32 handle;
+};
+
+struct drm_tegra_gem_mmap {
+ __u32 handle;
+ __u32 pad;
+ __u64 offset;
+};
+
+struct drm_tegra_syncpt_read {
+ __u32 id;
+ __u32 value;
+};
+
+struct drm_tegra_syncpt_incr {
+ __u32 id;
+ __u32 pad;
+};
+
+struct drm_tegra_syncpt_wait {
+ __u32 id;
+ __u32 thresh;
+ __u32 timeout;
+ __u32 value;
+};
+
+#define DRM_TEGRA_NO_TIMEOUT (0xffffffff)
+
+struct drm_tegra_open_channel {
+ __u32 client;
+ __u32 pad;
+ __u64 context;
+};
+
+struct drm_tegra_close_channel {
+ __u64 context;
+};
+
+struct drm_tegra_get_syncpt {
+ __u64 context;
+ __u32 index;
+ __u32 id;
+};
+
+struct drm_tegra_get_syncpt_base {
+ __u64 context;
+ __u32 syncpt;
+ __u32 id;
+};
+
+struct drm_tegra_syncpt {
+ __u32 id;
+ __u32 incrs;
+};
+
+struct drm_tegra_cmdbuf {
+ __u32 handle;
+ __u32 offset;
+ __u32 words;
+ __u32 pad;
+};
+
+struct drm_tegra_reloc {
+ struct {
+ __u32 handle;
+ __u32 offset;
+ } cmdbuf;
+ struct {
+ __u32 handle;
+ __u32 offset;
+ } target;
+ __u32 shift;
+ __u32 pad;
+};
+
+struct drm_tegra_waitchk {
+ __u32 handle;
+ __u32 offset;
+ __u32 syncpt;
+ __u32 thresh;
+};
+
+struct drm_tegra_submit {
+ __u64 context;
+ __u32 num_syncpts;
+ __u32 num_cmdbufs;
+ __u32 num_relocs;
+ __u32 num_waitchks;
+ __u32 waitchk_mask;
+ __u32 timeout;
+ __u64 syncpts;
+ __u64 cmdbufs;
+ __u64 relocs;
+ __u64 waitchks;
+ __u32 fence; /* Return value */
+
+ __u32 reserved[5]; /* future expansion */
+};
+
+#define DRM_TEGRA_GEM_TILING_MODE_PITCH 0
+#define DRM_TEGRA_GEM_TILING_MODE_TILED 1
+#define DRM_TEGRA_GEM_TILING_MODE_BLOCK 2
+
+struct drm_tegra_gem_set_tiling {
+ /* input */
+ __u32 handle;
+ __u32 mode;
+ __u32 value;
+ __u32 pad;
+};
+
+struct drm_tegra_gem_get_tiling {
+ /* input */
+ __u32 handle;
+ /* output */
+ __u32 mode;
+ __u32 value;
+ __u32 pad;
+};
+
+#define DRM_TEGRA_GEM_BOTTOM_UP (1 << 0)
+#define DRM_TEGRA_GEM_FLAGS (DRM_TEGRA_GEM_BOTTOM_UP)
+
+struct drm_tegra_gem_set_flags {
+ /* input */
+ __u32 handle;
+ /* output */
+ __u32 flags;
+};
+
+struct drm_tegra_gem_get_flags {
+ /* input */
+ __u32 handle;
+ /* output */
+ __u32 flags;
+};
+
+#define DRM_TEGRA_GEM_CREATE 0x00
+#define DRM_TEGRA_GEM_MMAP 0x01
+#define DRM_TEGRA_SYNCPT_READ 0x02
+#define DRM_TEGRA_SYNCPT_INCR 0x03
+#define DRM_TEGRA_SYNCPT_WAIT 0x04
+#define DRM_TEGRA_OPEN_CHANNEL 0x05
+#define DRM_TEGRA_CLOSE_CHANNEL 0x06
+#define DRM_TEGRA_GET_SYNCPT 0x07
+#define DRM_TEGRA_SUBMIT 0x08
+#define DRM_TEGRA_GET_SYNCPT_BASE 0x09
+#define DRM_TEGRA_GEM_SET_TILING 0x0a
+#define DRM_TEGRA_GEM_GET_TILING 0x0b
+#define DRM_TEGRA_GEM_SET_FLAGS 0x0c
+#define DRM_TEGRA_GEM_GET_FLAGS 0x0d
+
+#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
+#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
+#define DRM_IOCTL_TEGRA_SYNCPT_READ DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_READ, struct drm_tegra_syncpt_read)
+#define DRM_IOCTL_TEGRA_SYNCPT_INCR DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_INCR, struct drm_tegra_syncpt_incr)
+#define DRM_IOCTL_TEGRA_SYNCPT_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_WAIT, struct drm_tegra_syncpt_wait)
+#define DRM_IOCTL_TEGRA_OPEN_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_OPEN_CHANNEL, struct drm_tegra_open_channel)
+#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
+#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
+#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
+#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
+#define DRM_IOCTL_TEGRA_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_TILING, struct drm_tegra_gem_set_tiling)
+#define DRM_IOCTL_TEGRA_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_TILING, struct drm_tegra_gem_get_tiling)
+#define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
+#define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/vc4_drm.h b/chromium/third_party/libdrm/src/include/drm/vc4_drm.h
new file mode 100644
index 00000000000..919eecea762
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/vc4_drm.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright © 2014-2015 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _VC4_DRM_H_
+#define _VC4_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_VC4_SUBMIT_CL 0x00
+#define DRM_VC4_WAIT_SEQNO 0x01
+#define DRM_VC4_WAIT_BO 0x02
+#define DRM_VC4_CREATE_BO 0x03
+#define DRM_VC4_MMAP_BO 0x04
+#define DRM_VC4_CREATE_SHADER_BO 0x05
+#define DRM_VC4_GET_HANG_STATE 0x06
+#define DRM_VC4_GET_PARAM 0x07
+
+#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
+#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
+#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
+#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
+#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
+#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
+#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
+#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
+
+struct drm_vc4_submit_rcl_surface {
+ __u32 hindex; /* Handle index, or ~0 if not present. */
+ __u32 offset; /* Offset to start of buffer. */
+ /*
+ * Bits for either render config (color_write) or load/store packet.
+ * Bits should all be 0 for MSAA load/stores.
+ */
+ __u16 bits;
+
+#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
+ __u16 flags;
+};
+
+/**
+ * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
+ * engine.
+ *
+ * Drivers typically use GPU BOs to store batchbuffers / command lists and
+ * their associated state. However, because the VC4 lacks an MMU, we have to
+ * do validation of memory accesses by the GPU commands. If we were to store
+ * our commands in BOs, we'd need to do uncached readback from them to do the
+ * validation process, which is too expensive. Instead, userspace accumulates
+ * commands and associated state in plain memory, then the kernel copies the
+ * data to its own address space, and then validates and stores it in a GPU
+ * BO.
+ */
+struct drm_vc4_submit_cl {
+ /* Pointer to the binner command list.
+ *
+ * This is the first set of commands executed, which runs the
+ * coordinate shader to determine where primitives land on the screen,
+ * then writes out the state updates and draw calls necessary per tile
+ * to the tile allocation BO.
+ */
+ __u64 bin_cl;
+
+ /* Pointer to the shader records.
+ *
+ * Shader records are the structures read by the hardware that contain
+ * pointers to uniforms, shaders, and vertex attributes. The
+ * reference to the shader record has enough information to determine
+ * how many pointers are necessary (fixed number for shaders/uniforms,
+ * and an attribute count), so those BO indices into bo_handles are
+ * just stored as __u32s before each shader record passed in.
+ */
+ __u64 shader_rec;
+
+ /* Pointer to uniform data and texture handles for the textures
+ * referenced by the shader.
+ *
+ * For each shader state record, there is a set of uniform data in the
+ * order referenced by the record (FS, VS, then CS). Each set of
+ * uniform data has a __u32 index into bo_handles per texture
+ * sample operation, in the order the QPU_W_TMUn_S writes appear in
+ * the program. Following the texture BO handle indices is the actual
+ * uniform data.
+ *
+ * The individual uniform state blocks don't have sizes passed in,
+ * because the kernel has to determine the sizes anyway during shader
+ * code validation.
+ */
+ __u64 uniforms;
+ __u64 bo_handles;
+
+ /* Size in bytes of the binner command list. */
+ __u32 bin_cl_size;
+ /* Size in bytes of the set of shader records. */
+ __u32 shader_rec_size;
+ /* Number of shader records.
+ *
+ * This could just be computed from the contents of shader_records and
+ * the address bits of references to them from the bin CL, but it
+ * keeps the kernel from having to resize some allocations it makes.
+ */
+ __u32 shader_rec_count;
+ /* Size in bytes of the uniform state. */
+ __u32 uniforms_size;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ /* RCL setup: */
+ __u16 width;
+ __u16 height;
+ __u8 min_x_tile;
+ __u8 min_y_tile;
+ __u8 max_x_tile;
+ __u8 max_y_tile;
+ struct drm_vc4_submit_rcl_surface color_read;
+ struct drm_vc4_submit_rcl_surface color_write;
+ struct drm_vc4_submit_rcl_surface zs_read;
+ struct drm_vc4_submit_rcl_surface zs_write;
+ struct drm_vc4_submit_rcl_surface msaa_color_write;
+ struct drm_vc4_submit_rcl_surface msaa_zs_write;
+ __u32 clear_color[2];
+ __u32 clear_z;
+ __u8 clear_s;
+
+ __u32 pad:24;
+
+#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
+ __u32 flags;
+
+ /* Returned value of the seqno of this render job (for the
+ * wait ioctl).
+ */
+ __u64 seqno;
+};
+
+/**
+ * struct drm_vc4_wait_seqno - ioctl argument for waiting for
+ * DRM_VC4_SUBMIT_CL completion using its returned seqno.
+ *
+ * timeout_ns is the timeout in nanoseconds, where "0" means "don't
+ * block, just return the status."
+ */
+struct drm_vc4_wait_seqno {
+ __u64 seqno;
+ __u64 timeout_ns;
+};
+
+/**
+ * struct drm_vc4_wait_bo - ioctl argument for waiting for
+ * completion of the last DRM_VC4_SUBMIT_CL on a BO.
+ *
+ * This is useful for cases where multiple processes might be
+ * rendering to a BO and you want to wait for all rendering to be
+ * completed.
+ */
+struct drm_vc4_wait_bo {
+ __u32 handle;
+ __u32 pad;
+ __u64 timeout_ns;
+};
+
+/**
+ * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_vc4_create_bo {
+ __u32 size;
+ __u32 flags;
+ /** Returned GEM handle for the BO. */
+ __u32 handle;
+ __u32 pad;
+};
+
+/**
+ * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
+ *
+ * This doesn't actually perform an mmap. Instead, it returns the
+ * offset you need to use in an mmap on the DRM device node. This
+ * means that tools like valgrind end up knowing about the mapped
+ * memory.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_vc4_mmap_bo {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 flags;
+ /** offset into the drm node to use for subsequent mmap call. */
+ __u64 offset;
+};
+
+/**
+ * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
+ * shader BOs.
+ *
+ * Since allowing a shader to be overwritten while it's also being
+ * executed from would allow privlege escalation, shaders must be
+ * created using this ioctl, and they can't be mmapped later.
+ */
+struct drm_vc4_create_shader_bo {
+ /* Size of the data argument. */
+ __u32 size;
+ /* Flags, currently must be 0. */
+ __u32 flags;
+
+ /* Pointer to the data. */
+ __u64 data;
+
+ /** Returned GEM handle for the BO. */
+ __u32 handle;
+ /* Pad, must be 0. */
+ __u32 pad;
+};
+
+struct drm_vc4_get_hang_state_bo {
+ __u32 handle;
+ __u32 paddr;
+ __u32 size;
+ __u32 pad;
+};
+
+/**
+ * struct drm_vc4_hang_state - ioctl argument for collecting state
+ * from a GPU hang for analysis.
+*/
+struct drm_vc4_get_hang_state {
+ /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
+ __u64 bo;
+ /**
+ * On input, the size of the bo array. Output is the number
+ * of bos to be returned.
+ */
+ __u32 bo_count;
+
+ __u32 start_bin, start_render;
+
+ __u32 ct0ca, ct0ea;
+ __u32 ct1ca, ct1ea;
+ __u32 ct0cs, ct1cs;
+ __u32 ct0ra0, ct1ra0;
+
+ __u32 bpca, bpcs;
+ __u32 bpoa, bpos;
+
+ __u32 vpmbase;
+
+ __u32 dbge;
+ __u32 fdbgo;
+ __u32 fdbgb;
+ __u32 fdbgr;
+ __u32 fdbgs;
+ __u32 errstat;
+
+ /* Pad that we may save more registers into in the future. */
+ __u32 pad[16];
+};
+
+#define DRM_VC4_PARAM_V3D_IDENT0 0
+#define DRM_VC4_PARAM_V3D_IDENT1 1
+#define DRM_VC4_PARAM_V3D_IDENT2 2
+#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
+
+struct drm_vc4_get_param {
+ __u32 param;
+ __u32 pad;
+ __u64 value;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _VC4_DRM_H_ */
diff --git a/chromium/third_party/libdrm/src/include/drm/vgem_drm.h b/chromium/third_party/libdrm/src/include/drm/vgem_drm.h
new file mode 100644
index 00000000000..9be928f4a52
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/vgem_drm.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 The Chromium OS Authors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _VGEM_DRM_H_
+#define _VGEM_DRM_H_
+
+#include "drm.h"
+
+#define DRM_VGEM_MODE_MAP_DUMB 0x00
+
+#define DRM_IOCTL_VGEM_MODE_MAP_DUMB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VGEM_MODE_MAP_DUMB, struct drm_mode_map_dumb)
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/via_drm.h b/chromium/third_party/libdrm/src/include/drm/via_drm.h
new file mode 100644
index 00000000000..182f8792fb4
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/via_drm.h
@@ -0,0 +1,275 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _VIA_DRM_H_
+#define _VIA_DRM_H_
+
+#include "drm.h"
+
+/* WARNING: These defines must be the same as what the Xserver uses.
+ * if you change them, you must change the defines in the Xserver.
+ */
+
+#ifndef _VIA_DEFINES_
+#define _VIA_DEFINES_
+
+#include "via_drmclient.h"
+
+#define VIA_NR_SAREA_CLIPRECTS 8
+#define VIA_NR_XVMC_PORTS 10
+#define VIA_NR_XVMC_LOCKS 5
+#define VIA_MAX_CACHELINE_SIZE 64
+#define XVMCLOCKPTR(saPriv,lockNo) \
+ ((__volatile__ struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
+ (VIA_MAX_CACHELINE_SIZE - 1)) & \
+ ~(VIA_MAX_CACHELINE_SIZE - 1)) + \
+ VIA_MAX_CACHELINE_SIZE*(lockNo)))
+
+/* Each region is a minimum of 64k, and there are at most 64 of them.
+ */
+#define VIA_NR_TEX_REGIONS 64
+#define VIA_LOG_MIN_TEX_REGION_SIZE 16
+#endif
+
+#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
+#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
+#define VIA_UPLOAD_CTX 0x4
+#define VIA_UPLOAD_BUFFERS 0x8
+#define VIA_UPLOAD_TEX0 0x10
+#define VIA_UPLOAD_TEX1 0x20
+#define VIA_UPLOAD_CLIPRECTS 0x40
+#define VIA_UPLOAD_ALL 0xff
+
+/* VIA specific ioctls */
+#define DRM_VIA_ALLOCMEM 0x00
+#define DRM_VIA_FREEMEM 0x01
+#define DRM_VIA_AGP_INIT 0x02
+#define DRM_VIA_FB_INIT 0x03
+#define DRM_VIA_MAP_INIT 0x04
+#define DRM_VIA_DEC_FUTEX 0x05
+#define NOT_USED
+#define DRM_VIA_DMA_INIT 0x07
+#define DRM_VIA_CMDBUFFER 0x08
+#define DRM_VIA_FLUSH 0x09
+#define DRM_VIA_PCICMD 0x0a
+#define DRM_VIA_CMDBUF_SIZE 0x0b
+#define NOT_USED
+#define DRM_VIA_WAIT_IRQ 0x0d
+#define DRM_VIA_DMA_BLIT 0x0e
+#define DRM_VIA_BLIT_SYNC 0x0f
+
+#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
+#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
+#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
+#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
+#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
+#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
+#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
+#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
+#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
+#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
+#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
+ drm_via_cmdbuf_size_t)
+#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
+#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
+#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+#define VIA_TEX_SETUP_SIZE 8
+
+/* Flags for clear ioctl
+ */
+#define VIA_FRONT 0x1
+#define VIA_BACK 0x2
+#define VIA_DEPTH 0x4
+#define VIA_STENCIL 0x8
+#define VIA_MEM_VIDEO 0 /* matches drm constant */
+#define VIA_MEM_AGP 1 /* matches drm constant */
+#define VIA_MEM_SYSTEM 2
+#define VIA_MEM_MIXED 3
+#define VIA_MEM_UNKNOWN 4
+
+typedef struct {
+ __u32 offset;
+ __u32 size;
+} drm_via_agp_t;
+
+typedef struct {
+ __u32 offset;
+ __u32 size;
+} drm_via_fb_t;
+
+typedef struct {
+ __u32 context;
+ __u32 type;
+ __u32 size;
+ unsigned long index;
+ unsigned long offset;
+} drm_via_mem_t;
+
+typedef struct _drm_via_init {
+ enum {
+ VIA_INIT_MAP = 0x01,
+ VIA_CLEANUP_MAP = 0x02
+ } func;
+
+ unsigned long sarea_priv_offset;
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long agpAddr;
+} drm_via_init_t;
+
+typedef struct _drm_via_futex {
+ enum {
+ VIA_FUTEX_WAIT = 0x00,
+ VIA_FUTEX_WAKE = 0X01
+ } func;
+ __u32 ms;
+ __u32 lock;
+ __u32 val;
+} drm_via_futex_t;
+
+typedef struct _drm_via_dma_init {
+ enum {
+ VIA_INIT_DMA = 0x01,
+ VIA_CLEANUP_DMA = 0x02,
+ VIA_DMA_INITIALIZED = 0x03
+ } func;
+
+ unsigned long offset;
+ unsigned long size;
+ unsigned long reg_pause_addr;
+} drm_via_dma_init_t;
+
+typedef struct _drm_via_cmdbuffer {
+ char *buf;
+ unsigned long size;
+} drm_via_cmdbuffer_t;
+
+/* Warning: If you change the SAREA structure you must change the Xserver
+ * structure as well */
+
+typedef struct _drm_via_tex_region {
+ unsigned char next, prev; /* indices to form a circular LRU */
+ unsigned char inUse; /* owned by a client, or free? */
+ int age; /* tracked by clients to update local LRU's */
+} drm_via_tex_region_t;
+
+typedef struct _drm_via_sarea {
+ unsigned int dirty;
+ unsigned int nbox;
+ struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
+ drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
+ int texAge; /* last time texture was uploaded */
+ int ctxOwner; /* last context to upload state */
+ int vertexPrim;
+
+ /*
+ * Below is for XvMC.
+ * We want the lock integers alone on, and aligned to, a cache line.
+ * Therefore this somewhat strange construct.
+ */
+
+ char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
+
+ unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
+ unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
+ unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
+
+ /* Used by the 3d driver only at this point, for pageflipping:
+ */
+ unsigned int pfCurrentOffset;
+} drm_via_sarea_t;
+
+typedef struct _drm_via_cmdbuf_size {
+ enum {
+ VIA_CMDBUF_SPACE = 0x01,
+ VIA_CMDBUF_LAG = 0x02
+ } func;
+ int wait;
+ __u32 size;
+} drm_via_cmdbuf_size_t;
+
+typedef enum {
+ VIA_IRQ_ABSOLUTE = 0x0,
+ VIA_IRQ_RELATIVE = 0x1,
+ VIA_IRQ_SIGNAL = 0x10000000,
+ VIA_IRQ_FORCE_SEQUENCE = 0x20000000
+} via_irq_seq_type_t;
+
+#define VIA_IRQ_FLAGS_MASK 0xF0000000
+
+enum drm_via_irqs {
+ drm_via_irq_hqv0 = 0,
+ drm_via_irq_hqv1,
+ drm_via_irq_dma0_dd,
+ drm_via_irq_dma0_td,
+ drm_via_irq_dma1_dd,
+ drm_via_irq_dma1_td,
+ drm_via_irq_num
+};
+
+struct drm_via_wait_irq_request {
+ unsigned irq;
+ via_irq_seq_type_t type;
+ __u32 sequence;
+ __u32 signal;
+};
+
+typedef union drm_via_irqwait {
+ struct drm_via_wait_irq_request request;
+ struct drm_wait_vblank_reply reply;
+} drm_via_irqwait_t;
+
+typedef struct drm_via_blitsync {
+ __u32 sync_handle;
+ unsigned engine;
+} drm_via_blitsync_t;
+
+/* - * Below,"flags" is currently unused but will be used for possible future
+ * extensions like kernel space bounce buffers for bad alignments and
+ * blit engine busy-wait polling for better latency in the absence of
+ * interrupts.
+ */
+
+typedef struct drm_via_dmablit {
+ __u32 num_lines;
+ __u32 line_length;
+
+ __u32 fb_addr;
+ __u32 fb_stride;
+
+ unsigned char *mem_addr;
+ __u32 mem_stride;
+
+ __u32 flags;
+ int to_fb;
+
+ drm_via_blitsync_t sync;
+} drm_via_dmablit_t;
+
+#endif /* _VIA_DRM_H_ */
diff --git a/chromium/third_party/libdrm/src/include/drm/virtgpu_drm.h b/chromium/third_party/libdrm/src/include/drm/virtgpu_drm.h
new file mode 100644
index 00000000000..91a31ffed82
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/virtgpu_drm.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2013 Red Hat
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRTGPU_DRM_H
+#define VIRTGPU_DRM_H
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ *
+ * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
+ * compatibility Keep fields aligned to their size
+ */
+
+#define DRM_VIRTGPU_MAP 0x01
+#define DRM_VIRTGPU_EXECBUFFER 0x02
+#define DRM_VIRTGPU_GETPARAM 0x03
+#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
+#define DRM_VIRTGPU_RESOURCE_INFO 0x05
+#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
+#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
+#define DRM_VIRTGPU_WAIT 0x08
+#define DRM_VIRTGPU_GET_CAPS 0x09
+
+struct drm_virtgpu_map {
+ __u64 offset; /* use for mmap system call */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_virtgpu_execbuffer {
+ __u32 flags; /* for future use */
+ __u32 size;
+ __u64 command; /* void* */
+ __u64 bo_handles;
+ __u32 num_bo_handles;
+ __u32 pad;
+};
+
+#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+
+struct drm_virtgpu_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+/* NO_BO flags? NO resource flag? */
+/* resource flag for y_0_top */
+struct drm_virtgpu_resource_create {
+ __u32 target;
+ __u32 format;
+ __u32 bind;
+ __u32 width;
+ __u32 height;
+ __u32 depth;
+ __u32 array_size;
+ __u32 last_level;
+ __u32 nr_samples;
+ __u32 flags;
+ __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
+ __u32 res_handle; /* returned by kernel */
+ __u32 size; /* validate transfer in the host */
+ __u32 stride; /* validate transfer in the host */
+};
+
+struct drm_virtgpu_resource_info {
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u32 size;
+ __u32 stride;
+};
+
+struct drm_virtgpu_3d_box {
+ __u32 x;
+ __u32 y;
+ __u32 z;
+ __u32 w;
+ __u32 h;
+ __u32 d;
+};
+
+struct drm_virtgpu_3d_transfer_to_host {
+ __u32 bo_handle;
+ struct drm_virtgpu_3d_box box;
+ __u32 level;
+ __u32 offset;
+};
+
+struct drm_virtgpu_3d_transfer_from_host {
+ __u32 bo_handle;
+ struct drm_virtgpu_3d_box box;
+ __u32 level;
+ __u32 offset;
+};
+
+#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
+struct drm_virtgpu_3d_wait {
+ __u32 handle; /* 0 is an invalid handle */
+ __u32 flags;
+};
+
+struct drm_virtgpu_get_caps {
+ __u32 cap_set_id;
+ __u32 cap_set_ver;
+ __u64 addr;
+ __u32 size;
+ __u32 pad;
+};
+
+#define DRM_IOCTL_VIRTGPU_MAP \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
+
+#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+ struct drm_virtgpu_execbuffer)
+
+#define DRM_IOCTL_VIRTGPU_GETPARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
+ struct drm_virtgpu_getparam)
+
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
+ struct drm_virtgpu_resource_create)
+
+#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
+ struct drm_virtgpu_resource_info)
+
+#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
+ struct drm_virtgpu_3d_transfer_from_host)
+
+#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
+ struct drm_virtgpu_3d_transfer_to_host)
+
+#define DRM_IOCTL_VIRTGPU_WAIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
+ struct drm_virtgpu_3d_wait)
+
+#define DRM_IOCTL_VIRTGPU_GET_CAPS \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
+ struct drm_virtgpu_get_caps)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/chromium/third_party/libdrm/src/include/drm/vmwgfx_drm.h b/chromium/third_party/libdrm/src/include/drm/vmwgfx_drm.h
new file mode 100644
index 00000000000..5b68b4d1088
--- /dev/null
+++ b/chromium/third_party/libdrm/src/include/drm/vmwgfx_drm.h
@@ -0,0 +1,1090 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef __VMWGFX_DRM_H__
+#define __VMWGFX_DRM_H__
+
+#include "drm.h"
+
+#define DRM_VMW_MAX_SURFACE_FACES 6
+#define DRM_VMW_MAX_MIP_LEVELS 24
+
+
+#define DRM_VMW_GET_PARAM 0
+#define DRM_VMW_ALLOC_DMABUF 1
+#define DRM_VMW_UNREF_DMABUF 2
+#define DRM_VMW_CURSOR_BYPASS 3
+/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
+#define DRM_VMW_CONTROL_STREAM 4
+#define DRM_VMW_CLAIM_STREAM 5
+#define DRM_VMW_UNREF_STREAM 6
+/* guarded by DRM_VMW_PARAM_3D == 1 */
+#define DRM_VMW_CREATE_CONTEXT 7
+#define DRM_VMW_UNREF_CONTEXT 8
+#define DRM_VMW_CREATE_SURFACE 9
+#define DRM_VMW_UNREF_SURFACE 10
+#define DRM_VMW_REF_SURFACE 11
+#define DRM_VMW_EXECBUF 12
+#define DRM_VMW_GET_3D_CAP 13
+#define DRM_VMW_FENCE_WAIT 14
+#define DRM_VMW_FENCE_SIGNALED 15
+#define DRM_VMW_FENCE_UNREF 16
+#define DRM_VMW_FENCE_EVENT 17
+#define DRM_VMW_PRESENT 18
+#define DRM_VMW_PRESENT_READBACK 19
+#define DRM_VMW_UPDATE_LAYOUT 20
+#define DRM_VMW_CREATE_SHADER 21
+#define DRM_VMW_UNREF_SHADER 22
+#define DRM_VMW_GB_SURFACE_CREATE 23
+#define DRM_VMW_GB_SURFACE_REF 24
+#define DRM_VMW_SYNCCPU 25
+#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GET_PARAM - get device information.
+ *
+ * DRM_VMW_PARAM_FIFO_OFFSET:
+ * Offset to use to map the first page of the FIFO read-only.
+ * The fifo is mapped using the mmap() system call on the drm device.
+ *
+ * DRM_VMW_PARAM_OVERLAY_IOCTL:
+ * Does the driver support the overlay ioctl.
+ */
+
+#define DRM_VMW_PARAM_NUM_STREAMS 0
+#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
+#define DRM_VMW_PARAM_3D 2
+#define DRM_VMW_PARAM_HW_CAPS 3
+#define DRM_VMW_PARAM_FIFO_CAPS 4
+#define DRM_VMW_PARAM_MAX_FB_SIZE 5
+#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
+#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
+#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
+#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
+#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
+#define DRM_VMW_PARAM_SCREEN_TARGET 11
+#define DRM_VMW_PARAM_DX 12
+
+/**
+ * enum drm_vmw_handle_type - handle type for ref ioctls
+ *
+ */
+enum drm_vmw_handle_type {
+ DRM_VMW_HANDLE_LEGACY = 0,
+ DRM_VMW_HANDLE_PRIME = 1
+};
+
+/**
+ * struct drm_vmw_getparam_arg
+ *
+ * @value: Returned value. //Out
+ * @param: Parameter to query. //In.
+ *
+ * Argument to the DRM_VMW_GET_PARAM Ioctl.
+ */
+
+struct drm_vmw_getparam_arg {
+ __u64 value;
+ __u32 param;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+
+/**
+ * struct drm_vmw_context_arg
+ *
+ * @cid: Device unique context ID.
+ *
+ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
+ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
+ */
+
+struct drm_vmw_context_arg {
+ __s32 cid;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_CONTEXT - Create a host context.
+ *
+ * Frees a global context id, and queues a destroy host command for the host.
+ * Does not wait for host completion. The context ID can be used directly
+ * in the command stream and shows up as the same context ID on the host.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_SURFACE - Create a host suface.
+ *
+ * Allocates a device unique surface id, and queues a create surface command
+ * for the host. Does not wait for host completion. The surface ID can be
+ * used directly in the command stream and shows up as the same surface
+ * ID on the host.
+ */
+
+/**
+ * struct drm_wmv_surface_create_req
+ *
+ * @flags: Surface flags as understood by the host.
+ * @format: Surface format as understood by the host.
+ * @mip_levels: Number of mip levels for each face.
+ * An unused face should have 0 encoded.
+ * @size_addr: Address of a user-space array of sruct drm_vmw_size
+ * cast to an __u64 for 32-64 bit compatibility.
+ * The size of the array should equal the total number of mipmap levels.
+ * @shareable: Boolean whether other clients (as identified by file descriptors)
+ * may reference this surface.
+ * @scanout: Boolean whether the surface is intended to be used as a
+ * scanout.
+ *
+ * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
+ * Output data from the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+struct drm_vmw_surface_create_req {
+ __u32 flags;
+ __u32 format;
+ __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ __u64 size_addr;
+ __s32 shareable;
+ __s32 scanout;
+};
+
+/**
+ * struct drm_wmv_surface_arg
+ *
+ * @sid: Surface id of created surface or surface to destroy or reference.
+ * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
+ *
+ * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
+ * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
+ * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+struct drm_vmw_surface_arg {
+ __s32 sid;
+ enum drm_vmw_handle_type handle_type;
+};
+
+/**
+ * struct drm_vmw_size ioctl.
+ *
+ * @width - mip level width
+ * @height - mip level height
+ * @depth - mip level depth
+ *
+ * Description of a mip level.
+ * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
+ */
+
+struct drm_vmw_size {
+ __u32 width;
+ __u32 height;
+ __u32 depth;
+ __u32 pad64;
+};
+
+/**
+ * union drm_vmw_surface_create_arg
+ *
+ * @rep: Output data as described above.
+ * @req: Input data as described above.
+ *
+ * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
+ */
+
+union drm_vmw_surface_create_arg {
+ struct drm_vmw_surface_arg rep;
+ struct drm_vmw_surface_create_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_REF_SURFACE - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a give sid, as previously
+ * returned by the DRM_VMW_CREATE_SURFACE ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface ID in the command
+ * stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * in the DRM_VMW_CREATE_SURFACE ioctl.
+ */
+
+/**
+ * union drm_vmw_surface_reference_arg
+ *
+ * @rep: Output data as described above.
+ * @req: Input data as described above.
+ *
+ * Argument to the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+union drm_vmw_surface_reference_arg {
+ struct drm_vmw_surface_create_req rep;
+ struct drm_vmw_surface_arg req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
+ *
+ * Clear a reference previously put on a host surface.
+ * When all references are gone, including the one implicitly placed
+ * on creation,
+ * a destroy surface command will be queued for the host.
+ * Does not wait for completion.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_EXECBUF
+ *
+ * Submit a command buffer for execution on the host, and return a
+ * fence seqno that when signaled, indicates that the command buffer has
+ * executed.
+ */
+
+/**
+ * struct drm_vmw_execbuf_arg
+ *
+ * @commands: User-space address of a command buffer cast to an __u64.
+ * @command-size: Size in bytes of the command buffer.
+ * @throttle-us: Sleep until software is less than @throttle_us
+ * microseconds ahead of hardware. The driver may round this value
+ * to the nearest kernel tick.
+ * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
+ * __u64.
+ * @version: Allows expanding the execbuf ioctl parameters without breaking
+ * backwards compatibility, since user-space will always tell the kernel
+ * which version it uses.
+ * @flags: Execbuf flags. None currently.
+ *
+ * Argument to the DRM_VMW_EXECBUF Ioctl.
+ */
+
+#define DRM_VMW_EXECBUF_VERSION 2
+
+struct drm_vmw_execbuf_arg {
+ __u64 commands;
+ __u32 command_size;
+ __u32 throttle_us;
+ __u64 fence_rep;
+ __u32 version;
+ __u32 flags;
+ __u32 context_handle;
+ __u32 pad64;
+};
+
+/**
+ * struct drm_vmw_fence_rep
+ *
+ * @handle: Fence object handle for fence associated with a command submission.
+ * @mask: Fence flags relevant for this fence object.
+ * @seqno: Fence sequence number in fifo. A fence object with a lower
+ * seqno will signal the EXEC flag before a fence object with a higher
+ * seqno. This can be used by user-space to avoid kernel calls to determine
+ * whether a fence has signaled the EXEC flag. Note that @seqno will
+ * wrap at 32-bit.
+ * @passed_seqno: The highest seqno number processed by the hardware
+ * so far. This can be used to mark user-space fence objects as signaled, and
+ * to determine whether a fence seqno might be stale.
+ * @error: This member should've been set to -EFAULT on submission.
+ * The following actions should be take on completion:
+ * error == -EFAULT: Fence communication failed. The host is synchronized.
+ * Use the last fence id read from the FIFO fence register.
+ * error != 0 && error != -EFAULT:
+ * Fence submission failed. The host is synchronized. Use the fence_seq member.
+ * error == 0: All is OK, The host may not be synchronized.
+ * Use the fence_seq member.
+ *
+ * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
+ */
+
+struct drm_vmw_fence_rep {
+ __u32 handle;
+ __u32 mask;
+ __u32 seqno;
+ __u32 passed_seqno;
+ __u32 pad64;
+ __s32 error;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_ALLOC_DMABUF
+ *
+ * Allocate a DMA buffer that is visible also to the host.
+ * NOTE: The buffer is
+ * identified by a handle and an offset, which are private to the guest, but
+ * useable in the command stream. The guest kernel may translate these
+ * and patch up the command stream accordingly. In the future, the offset may
+ * be zero at all times, or it may disappear from the interface before it is
+ * fixed.
+ *
+ * The DMA buffer may stay user-space mapped in the guest at all times,
+ * and is thus suitable for sub-allocation.
+ *
+ * DMA buffers are mapped using the mmap() syscall on the drm device.
+ */
+
+/**
+ * struct drm_vmw_alloc_dmabuf_req
+ *
+ * @size: Required minimum size of the buffer.
+ *
+ * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+struct drm_vmw_alloc_dmabuf_req {
+ __u32 size;
+ __u32 pad64;
+};
+
+/**
+ * struct drm_vmw_dmabuf_rep
+ *
+ * @map_handle: Offset to use in the mmap() call used to map the buffer.
+ * @handle: Handle unique to this buffer. Used for unreferencing.
+ * @cur_gmr_id: GMR id to use in the command stream when this buffer is
+ * referenced. See not above.
+ * @cur_gmr_offset: Offset to use in the command stream when this buffer is
+ * referenced. See note above.
+ *
+ * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+struct drm_vmw_dmabuf_rep {
+ __u64 map_handle;
+ __u32 handle;
+ __u32 cur_gmr_id;
+ __u32 cur_gmr_offset;
+ __u32 pad64;
+};
+
+/**
+ * union drm_vmw_dmabuf_arg
+ *
+ * @req: Input data as described above.
+ * @rep: Output data as described above.
+ *
+ * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+union drm_vmw_alloc_dmabuf_arg {
+ struct drm_vmw_alloc_dmabuf_req req;
+ struct drm_vmw_dmabuf_rep rep;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
+ *
+ */
+
+/**
+ * struct drm_vmw_unref_dmabuf_arg
+ *
+ * @handle: Handle indicating what buffer to free. Obtained from the
+ * DRM_VMW_ALLOC_DMABUF Ioctl.
+ *
+ * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
+ */
+
+struct drm_vmw_unref_dmabuf_arg {
+ __u32 handle;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
+ *
+ * This IOCTL controls the overlay units of the svga device.
+ * The SVGA overlay units does not work like regular hardware units in
+ * that they do not automaticaly read back the contents of the given dma
+ * buffer. But instead only read back for each call to this ioctl, and
+ * at any point between this call being made and a following call that
+ * either changes the buffer or disables the stream.
+ */
+
+/**
+ * struct drm_vmw_rect
+ *
+ * Defines a rectangle. Used in the overlay ioctl to define
+ * source and destination rectangle.
+ */
+
+struct drm_vmw_rect {
+ __s32 x;
+ __s32 y;
+ __u32 w;
+ __u32 h;
+};
+
+/**
+ * struct drm_vmw_control_stream_arg
+ *
+ * @stream_id: Stearm to control
+ * @enabled: If false all following arguments are ignored.
+ * @handle: Handle to buffer for getting data from.
+ * @format: Format of the overlay as understood by the host.
+ * @width: Width of the overlay.
+ * @height: Height of the overlay.
+ * @size: Size of the overlay in bytes.
+ * @pitch: Array of pitches, the two last are only used for YUV12 formats.
+ * @offset: Offset from start of dma buffer to overlay.
+ * @src: Source rect, must be within the defined area above.
+ * @dst: Destination rect, x and y may be negative.
+ *
+ * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
+ */
+
+struct drm_vmw_control_stream_arg {
+ __u32 stream_id;
+ __u32 enabled;
+
+ __u32 flags;
+ __u32 color_key;
+
+ __u32 handle;
+ __u32 offset;
+ __s32 format;
+ __u32 size;
+ __u32 width;
+ __u32 height;
+ __u32 pitch[3];
+
+ __u32 pad64;
+ struct drm_vmw_rect src;
+ struct drm_vmw_rect dst;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
+ *
+ */
+
+#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
+#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
+
+/**
+ * struct drm_vmw_cursor_bypass_arg
+ *
+ * @flags: Flags.
+ * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
+ * @xpos: X position of cursor.
+ * @ypos: Y position of cursor.
+ * @xhot: X hotspot.
+ * @yhot: Y hotspot.
+ *
+ * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
+ */
+
+struct drm_vmw_cursor_bypass_arg {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 xpos;
+ __s32 ypos;
+ __s32 xhot;
+ __s32 yhot;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CLAIM_STREAM - Claim a single stream.
+ */
+
+/**
+ * struct drm_vmw_context_arg
+ *
+ * @stream_id: Device unique context ID.
+ *
+ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
+ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
+ */
+
+struct drm_vmw_stream_arg {
+ __u32 stream_id;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_STREAM - Unclaim a stream.
+ *
+ * Return a single stream that was claimed by this process. Also makes
+ * sure that the stream has been stopped.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GET_3D_CAP
+ *
+ * Read 3D capabilities from the FIFO
+ *
+ */
+
+/**
+ * struct drm_vmw_get_3d_cap_arg
+ *
+ * @buffer: Pointer to a buffer for capability data, cast to an __u64
+ * @size: Max size to copy
+ *
+ * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
+ * ioctls.
+ */
+
+struct drm_vmw_get_3d_cap_arg {
+ __u64 buffer;
+ __u32 max_size;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_WAIT
+ *
+ * Waits for a fence object to signal. The wait is interruptible, so that
+ * signals may be delivered during the interrupt. The wait may timeout,
+ * in which case the calls returns -EBUSY. If the wait is restarted,
+ * that is restarting without resetting @cookie_valid to zero,
+ * the timeout is computed from the first call.
+ *
+ * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
+ * on:
+ * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
+ * stream
+ * have executed.
+ * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
+ * commands
+ * in the buffer given to the EXECBUF ioctl returning the fence object handle
+ * are available to user-space.
+ *
+ * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
+ * fenc wait ioctl returns 0, the fence object has been unreferenced after
+ * the wait.
+ */
+
+#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
+#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
+
+#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
+
+/**
+ * struct drm_vmw_fence_wait_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
+ * @kernel_cookie: Set to 0 on first call. Left alone on restart.
+ * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
+ * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
+ * before returning.
+ * @flags: Fence flags to wait on.
+ * @wait_options: Options that control the behaviour of the wait ioctl.
+ *
+ * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
+ */
+
+struct drm_vmw_fence_wait_arg {
+ __u32 handle;
+ __s32 cookie_valid;
+ __u64 kernel_cookie;
+ __u64 timeout_us;
+ __s32 lazy;
+ __s32 flags;
+ __s32 wait_options;
+ __s32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_SIGNALED
+ *
+ * Checks if a fence object is signaled..
+ */
+
+/**
+ * struct drm_vmw_fence_signaled_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
+ * @signaled: Out: Flags signaled.
+ * @sequence: Out: Highest sequence passed so far. Can be used to signal the
+ * EXEC flag of user-space fence objects.
+ *
+ * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
+ * ioctls.
+ */
+
+struct drm_vmw_fence_signaled_arg {
+ __u32 handle;
+ __u32 flags;
+ __s32 signaled;
+ __u32 passed_seqno;
+ __u32 signaled_flags;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_UNREF
+ *
+ * Unreferences a fence object, and causes it to be destroyed if there are no
+ * other references to it.
+ *
+ */
+
+/**
+ * struct drm_vmw_fence_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ *
+ * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
+ */
+
+struct drm_vmw_fence_arg {
+ __u32 handle;
+ __u32 pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_EVENT
+ *
+ * Queues an event on a fence to be delivered on the drm character device
+ * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
+ * Optionally the approximate time when the fence signaled is
+ * given by the event.
+ */
+
+/*
+ * The event type
+ */
+#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
+
+struct drm_vmw_event_fence {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+};
+
+/*
+ * Flags that may be given to the command.
+ */
+/* Request fence signaled time on the event. */
+#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
+
+/**
+ * struct drm_vmw_fence_event_arg
+ *
+ * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
+ * the fence is not supposed to be referenced by user-space.
+ * @user_info: Info to be delivered with the event.
+ * @handle: Attach the event to this fence only.
+ * @flags: A set of flags as defined above.
+ */
+struct drm_vmw_fence_event_arg {
+ __u64 fence_rep;
+ __u64 user_data;
+ __u32 handle;
+ __u32 flags;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT
+ *
+ * Executes an SVGA present on a given fb for a given surface. The surface
+ * is placed on the framebuffer. Cliprects are given relative to the given
+ * point (the point disignated by dest_{x|y}).
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: framebuffer id to present / read back from.
+ * @sid: Surface id to present from.
+ * @dest_x: X placement coordinate for surface.
+ * @dest_y: Y placement coordinate for surface.
+ * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
+ * @num_clips: Number of cliprects given relative to the framebuffer origin,
+ * in the same coordinate space as the frame buffer.
+ * @pad64: Unused 64-bit padding.
+ *
+ * Input argument to the DRM_VMW_PRESENT ioctl.
+ */
+
+struct drm_vmw_present_arg {
+ __u32 fb_id;
+ __u32 sid;
+ __s32 dest_x;
+ __s32 dest_y;
+ __u64 clips_ptr;
+ __u32 num_clips;
+ __u32 pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT_READBACK
+ *
+ * Executes an SVGA present readback from a given fb to the dma buffer
+ * currently bound as the fb. If there is no dma buffer bound to the fb,
+ * an error will be returned.
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: fb_id to present / read back from.
+ * @num_clips: Number of cliprects.
+ * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
+ * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
+ * If this member is NULL, then the ioctl should not return a fence.
+ */
+
+struct drm_vmw_present_readback_arg {
+ __u32 fb_id;
+ __u32 num_clips;
+ __u64 clips_ptr;
+ __u64 fence_rep;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UPDATE_LAYOUT - Update layout
+ *
+ * Updates the preferred modes and connection status for connectors. The
+ * command consists of one drm_vmw_update_layout_arg pointing to an array
+ * of num_outputs drm_vmw_rect's.
+ */
+
+/**
+ * struct drm_vmw_update_layout_arg
+ *
+ * @num_outputs: number of active connectors
+ * @rects: pointer to array of drm_vmw_rect cast to an __u64
+ *
+ * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
+ */
+struct drm_vmw_update_layout_arg {
+ __u32 num_outputs;
+ __u32 pad64;
+ __u64 rects;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_SHADER - Create shader
+ *
+ * Creates a shader and optionally binds it to a dma buffer containing
+ * the shader byte-code.
+ */
+
+/**
+ * enum drm_vmw_shader_type - Shader types
+ */
+enum drm_vmw_shader_type {
+ drm_vmw_shader_type_vs = 0,
+ drm_vmw_shader_type_ps,
+};
+
+
+/**
+ * struct drm_vmw_shader_create_arg
+ *
+ * @shader_type: Shader type of the shader to create.
+ * @size: Size of the byte-code in bytes.
+ * where the shader byte-code starts
+ * @buffer_handle: Buffer handle identifying the buffer containing the
+ * shader byte-code
+ * @shader_handle: On successful completion contains a handle that
+ * can be used to subsequently identify the shader.
+ * @offset: Offset in bytes into the buffer given by @buffer_handle,
+ *
+ * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
+ */
+struct drm_vmw_shader_create_arg {
+ enum drm_vmw_shader_type shader_type;
+ __u32 size;
+ __u32 buffer_handle;
+ __u32 shader_handle;
+ __u64 offset;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_SHADER - Unreferences a shader
+ *
+ * Destroys a user-space reference to a shader, optionally destroying
+ * it.
+ */
+
+/**
+ * struct drm_vmw_shader_arg
+ *
+ * @handle: Handle identifying the shader to destroy.
+ *
+ * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
+ */
+struct drm_vmw_shader_arg {
+ __u32 handle;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ */
+
+/**
+ * enum drm_vmw_surface_flags
+ *
+ * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
+ * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
+ * surface.
+ * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
+ * given.
+ */
+enum drm_vmw_surface_flags {
+ drm_vmw_surface_flag_shareable = (1 << 0),
+ drm_vmw_surface_flag_scanout = (1 << 1),
+ drm_vmw_surface_flag_create_buffer = (1 << 2)
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_req
+ *
+ * @svga3d_flags: SVGA3d surface flags for the device.
+ * @format: SVGA3d format.
+ * @mip_level: Number of mip levels for all faces.
+ * @drm_surface_flags Flags as described above.
+ * @multisample_count Future use. Set to 0.
+ * @autogen_filter Future use. Set to 0.
+ * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
+ * if none.
+ * @base_size Size of the base mip level for all faces.
+ * @array_size Must be zero for non-DX hardware, and if non-zero
+ * svga3d_flags must have proper bind flags setup.
+ *
+ * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+struct drm_vmw_gb_surface_create_req {
+ __u32 svga3d_flags;
+ __u32 format;
+ __u32 mip_levels;
+ enum drm_vmw_surface_flags drm_surface_flags;
+ __u32 multisample_count;
+ __u32 autogen_filter;
+ __u32 buffer_handle;
+ __u32 array_size;
+ struct drm_vmw_size base_size;
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_rep
+ *
+ * @handle: Surface handle.
+ * @backup_size: Size of backup buffers for this surface.
+ * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
+ * @buffer_size: Actual size of the buffer identified by
+ * @buffer_handle
+ * @buffer_map_handle: Offset into device address space for the buffer
+ * identified by @buffer_handle.
+ *
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
+ * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+struct drm_vmw_gb_surface_create_rep {
+ __u32 handle;
+ __u32 backup_size;
+ __u32 buffer_handle;
+ __u32 buffer_size;
+ __u64 buffer_map_handle;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+union drm_vmw_gb_surface_create_arg {
+ struct drm_vmw_gb_surface_create_rep rep;
+ struct drm_vmw_gb_surface_create_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+
+/**
+ * struct drm_vmw_gb_surface_reference_arg
+ *
+ * @creq: The data used as input when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_req"
+ * @crep: Additional data output when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
+ */
+struct drm_vmw_gb_surface_ref_rep {
+ struct drm_vmw_gb_surface_create_req creq;
+ struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_arg {
+ struct drm_vmw_gb_surface_ref_rep rep;
+ struct drm_vmw_surface_arg req;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
+ *
+ * Idles any previously submitted GPU operations on the buffer and
+ * by default blocks command submissions that reference the buffer.
+ * If the file descriptor used to grab a blocking CPU sync is closed, the
+ * cpu sync is released.
+ * The flags argument indicates how the grab / release operation should be
+ * performed:
+ */
+
+/**
+ * enum drm_vmw_synccpu_flags - Synccpu flags:
+ *
+ * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
+ * hint to the kernel to allow command submissions that references the buffer
+ * for read-only.
+ * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
+ * referencing this buffer.
+ * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
+ * -EBUSY should the buffer be busy.
+ * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
+ * while the buffer is synced for CPU. This is similar to the GEM bo idle
+ * behavior.
+ */
+enum drm_vmw_synccpu_flags {
+ drm_vmw_synccpu_read = (1 << 0),
+ drm_vmw_synccpu_write = (1 << 1),
+ drm_vmw_synccpu_dontblock = (1 << 2),
+ drm_vmw_synccpu_allow_cs = (1 << 3)
+};
+
+/**
+ * enum drm_vmw_synccpu_op - Synccpu operations:
+ *
+ * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
+ * @drm_vmw_synccpu_release: Release a previous grab.
+ */
+enum drm_vmw_synccpu_op {
+ drm_vmw_synccpu_grab,
+ drm_vmw_synccpu_release
+};
+
+/**
+ * struct drm_vmw_synccpu_arg
+ *
+ * @op: The synccpu operation as described above.
+ * @handle: Handle identifying the buffer object.
+ * @flags: Flags as described above.
+ */
+struct drm_vmw_synccpu_arg {
+ enum drm_vmw_synccpu_op op;
+ enum drm_vmw_synccpu_flags flags;
+ __u32 handle;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+enum drm_vmw_extended_context {
+ drm_vmw_context_legacy,
+ drm_vmw_context_dx
+};
+
+/**
+ * union drm_vmw_extended_context_arg
+ *
+ * @req: Context type.
+ * @rep: Context identifier.
+ *
+ * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
+ */
+union drm_vmw_extended_context_arg {
+ enum drm_vmw_extended_context req;
+ struct drm_vmw_context_arg rep;
+};
+#endif
diff --git a/chromium/third_party/libdrm/src/intel/Android.mk b/chromium/third_party/libdrm/src/intel/Android.mk
new file mode 100644
index 00000000000..838a93b3b7f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/Android.mk
@@ -0,0 +1,43 @@
+#
+# Copyright © 2011 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+# Import variables LIBDRM_INTEL_FILES, LIBDRM_INTEL_H_FILES
+include $(LOCAL_PATH)/Makefile.sources
+
+LOCAL_MODULE := libdrm_intel
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SRC_FILES := $(patsubst %.h, , $(LIBDRM_INTEL_FILES))
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+
+LOCAL_CFLAGS := \
+ -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
+
+LOCAL_SHARED_LIBRARIES := \
+ libdrm \
+ libpciaccess
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/chromium/third_party/libdrm/src/intel/Makefile.am b/chromium/third_party/libdrm/src/intel/Makefile.am
new file mode 100644
index 00000000000..c52e8c086a8
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/Makefile.am
@@ -0,0 +1,73 @@
+# Copyright © 2008 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+# Eric Anholt <eric@anholt.net>
+
+include Makefile.sources
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ $(PTHREADSTUBS_CFLAGS) \
+ $(PCIACCESS_CFLAGS) \
+ $(VALGRIND_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_intel_la_LTLIBRARIES = libdrm_intel.la
+libdrm_intel_ladir = $(libdir)
+libdrm_intel_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libdrm_intel_la_LIBADD = ../libdrm.la \
+ @PTHREADSTUBS_LIBS@ \
+ @PCIACCESS_LIBS@ \
+ @CLOCK_LIB@
+
+libdrm_intel_la_SOURCES = $(LIBDRM_INTEL_FILES)
+
+libdrm_intelincludedir = ${includedir}/libdrm
+libdrm_intelinclude_HEADERS = $(LIBDRM_INTEL_H_FILES)
+
+# This may be interesting even outside of "make check", due to the -dump option.
+noinst_PROGRAMS = test_decode
+
+BATCHES = \
+ tests/gen4-3d.batch \
+ tests/gm45-3d.batch \
+ tests/gen5-3d.batch \
+ tests/gen6-3d.batch \
+ tests/gen7-2d-copy.batch \
+ tests/gen7-3d.batch
+
+TESTS = \
+ $(BATCHES:.batch=.batch.sh) \
+ intel-symbol-check
+
+EXTRA_DIST = \
+ $(BATCHES) \
+ $(BATCHES:.batch=.batch.sh) \
+ $(BATCHES:.batch=.batch-ref.txt) \
+ $(BATCHES:.batch=.batch-ref.txt) \
+ tests/test-batch.sh \
+ $(TESTS)
+
+test_decode_LDADD = libdrm_intel.la ../libdrm.la
+
+pkgconfig_DATA = libdrm_intel.pc
diff --git a/chromium/third_party/libdrm/src/intel/Makefile.sources b/chromium/third_party/libdrm/src/intel/Makefile.sources
new file mode 100644
index 00000000000..7b2272c7598
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/Makefile.sources
@@ -0,0 +1,14 @@
+LIBDRM_INTEL_FILES := \
+ intel_bufmgr.c \
+ intel_bufmgr_priv.h \
+ intel_bufmgr_fake.c \
+ intel_bufmgr_gem.c \
+ intel_decode.c \
+ intel_chipset.h \
+ mm.c \
+ mm.h
+
+LIBDRM_INTEL_H_FILES := \
+ intel_bufmgr.h \
+ intel_aub.h \
+ intel_debug.h
diff --git a/chromium/third_party/libdrm/src/intel/intel-symbol-check b/chromium/third_party/libdrm/src/intel/intel-symbol-check
new file mode 100755
index 00000000000..bde7634c321
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel-symbol-check
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+# The following symbols (past the first five) are taken from the public headers.
+# A list of the latter should be available Makefile.sources/LIBDRM_INTEL_H_FILES
+
+FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_intel.so} | awk '{print $3}' | while read func; do
+( grep -q "^$func$" || echo $func ) <<EOF
+__bss_start
+_edata
+_end
+_fini
+_init
+drm_intel_bo_alloc
+drm_intel_bo_alloc_for_render
+drm_intel_bo_alloc_tiled
+drm_intel_bo_alloc_userptr
+drm_intel_bo_busy
+drm_intel_bo_disable_reuse
+drm_intel_bo_emit_reloc
+drm_intel_bo_emit_reloc_fence
+drm_intel_bo_exec
+drm_intel_bo_fake_alloc_static
+drm_intel_bo_fake_disable_backing_store
+drm_intel_bo_flink
+drm_intel_bo_gem_create_from_name
+drm_intel_bo_gem_create_from_prime
+drm_intel_bo_gem_export_to_prime
+drm_intel_bo_get_subdata
+drm_intel_bo_get_tiling
+drm_intel_bo_is_reusable
+drm_intel_bo_madvise
+drm_intel_bo_map
+drm_intel_bo_mrb_exec
+drm_intel_bo_pin
+drm_intel_bo_reference
+drm_intel_bo_references
+drm_intel_bo_set_softpin_offset
+drm_intel_bo_set_tiling
+drm_intel_bo_subdata
+drm_intel_bo_unmap
+drm_intel_bo_unpin
+drm_intel_bo_unreference
+drm_intel_bo_use_48b_address_range
+drm_intel_bo_wait_rendering
+drm_intel_bufmgr_check_aperture_space
+drm_intel_bufmgr_destroy
+drm_intel_bufmgr_fake_contended_lock_take
+drm_intel_bufmgr_fake_evict_all
+drm_intel_bufmgr_fake_init
+drm_intel_bufmgr_fake_set_exec_callback
+drm_intel_bufmgr_fake_set_fence_callback
+drm_intel_bufmgr_fake_set_last_dispatch
+drm_intel_bufmgr_gem_enable_fenced_relocs
+drm_intel_bufmgr_gem_enable_reuse
+drm_intel_bufmgr_gem_get_devid
+drm_intel_bufmgr_gem_init
+drm_intel_bufmgr_gem_set_aub_annotations
+drm_intel_bufmgr_gem_set_aub_dump
+drm_intel_bufmgr_gem_set_aub_filename
+drm_intel_bufmgr_gem_set_vma_cache_size
+drm_intel_bufmgr_set_debug
+drm_intel_decode
+drm_intel_decode_context_alloc
+drm_intel_decode_context_free
+drm_intel_decode_set_batch_pointer
+drm_intel_decode_set_dump_past_end
+drm_intel_decode_set_head_tail
+drm_intel_decode_set_output_file
+drm_intel_gem_bo_aub_dump_bmp
+drm_intel_gem_bo_clear_relocs
+drm_intel_gem_bo_context_exec
+drm_intel_gem_bo_get_reloc_count
+drm_intel_gem_bo_map_gtt
+drm_intel_gem_bo_map_unsynchronized
+drm_intel_gem_bo_start_gtt_access
+drm_intel_gem_bo_unmap_gtt
+drm_intel_gem_bo_wait
+drm_intel_gem_context_create
+drm_intel_gem_context_destroy
+drm_intel_get_aperture_sizes
+drm_intel_get_eu_total
+drm_intel_get_pipe_from_crtc_id
+drm_intel_get_reset_stats
+drm_intel_get_subslice_total
+drm_intel_reg_read
+EOF
+done)
+
+test ! -n "$FUNCS" || echo $FUNCS
+test ! -n "$FUNCS"
diff --git a/chromium/third_party/libdrm/src/intel/intel_aub.h b/chromium/third_party/libdrm/src/intel/intel_aub.h
new file mode 100644
index 00000000000..5f0aba8e68e
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel_aub.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file intel_aub.h
+ *
+ * The AUB file is a file format used by Intel's internal simulation
+ * and other validation tools. It can be used at various levels by a
+ * driver to input state to the simulated hardware or a replaying
+ * debugger.
+ *
+ * We choose to dump AUB files using the trace block format for ease
+ * of implementation -- dump out the blocks of memory as plain blobs
+ * and insert ring commands to execute the batchbuffer blob.
+ */
+
+#ifndef _INTEL_AUB_H
+#define _INTEL_AUB_H
+
+#define AUB_MI_NOOP (0)
+#define AUB_MI_BATCH_BUFFER_START (0x31 << 23)
+#define AUB_PIPE_CONTROL (0x7a000002)
+
+/* DW0: instruction type. */
+
+#define CMD_AUB (7 << 29)
+
+#define CMD_AUB_HEADER (CMD_AUB | (1 << 23) | (0x05 << 16))
+/* DW1 */
+# define AUB_HEADER_MAJOR_SHIFT 24
+# define AUB_HEADER_MINOR_SHIFT 16
+
+#define CMD_AUB_TRACE_HEADER_BLOCK (CMD_AUB | (1 << 23) | (0x41 << 16))
+#define CMD_AUB_DUMP_BMP (CMD_AUB | (1 << 23) | (0x9e << 16))
+
+/* DW1 */
+#define AUB_TRACE_OPERATION_MASK 0x000000ff
+#define AUB_TRACE_OP_COMMENT 0x00000000
+#define AUB_TRACE_OP_DATA_WRITE 0x00000001
+#define AUB_TRACE_OP_COMMAND_WRITE 0x00000002
+#define AUB_TRACE_OP_MMIO_WRITE 0x00000003
+// operation = TRACE_DATA_WRITE, Type
+#define AUB_TRACE_TYPE_MASK 0x0000ff00
+#define AUB_TRACE_TYPE_NOTYPE (0 << 8)
+#define AUB_TRACE_TYPE_BATCH (1 << 8)
+#define AUB_TRACE_TYPE_VERTEX_BUFFER (5 << 8)
+#define AUB_TRACE_TYPE_2D_MAP (6 << 8)
+#define AUB_TRACE_TYPE_CUBE_MAP (7 << 8)
+#define AUB_TRACE_TYPE_VOLUME_MAP (9 << 8)
+#define AUB_TRACE_TYPE_1D_MAP (10 << 8)
+#define AUB_TRACE_TYPE_CONSTANT_BUFFER (11 << 8)
+#define AUB_TRACE_TYPE_CONSTANT_URB (12 << 8)
+#define AUB_TRACE_TYPE_INDEX_BUFFER (13 << 8)
+#define AUB_TRACE_TYPE_GENERAL (14 << 8)
+#define AUB_TRACE_TYPE_SURFACE (15 << 8)
+
+
+// operation = TRACE_COMMAND_WRITE, Type =
+#define AUB_TRACE_TYPE_RING_HWB (1 << 8)
+#define AUB_TRACE_TYPE_RING_PRB0 (2 << 8)
+#define AUB_TRACE_TYPE_RING_PRB1 (3 << 8)
+#define AUB_TRACE_TYPE_RING_PRB2 (4 << 8)
+
+// Address space
+#define AUB_TRACE_ADDRESS_SPACE_MASK 0x00ff0000
+#define AUB_TRACE_MEMTYPE_GTT (0 << 16)
+#define AUB_TRACE_MEMTYPE_LOCAL (1 << 16)
+#define AUB_TRACE_MEMTYPE_NONLOCAL (2 << 16)
+#define AUB_TRACE_MEMTYPE_PCI (3 << 16)
+#define AUB_TRACE_MEMTYPE_GTT_ENTRY (4 << 16)
+
+/* DW2 */
+
+/**
+ * aub_state_struct_type enum values are encoded with the top 16 bits
+ * representing the type to be delivered to the .aub file, and the bottom 16
+ * bits representing the subtype. This macro performs the encoding.
+ */
+#define ENCODE_SS_TYPE(type, subtype) (((type) << 16) | (subtype))
+
+enum aub_state_struct_type {
+ AUB_TRACE_VS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 1),
+ AUB_TRACE_GS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 2),
+ AUB_TRACE_CLIP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 3),
+ AUB_TRACE_SF_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 4),
+ AUB_TRACE_WM_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 5),
+ AUB_TRACE_CC_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 6),
+ AUB_TRACE_CLIP_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 7),
+ AUB_TRACE_SF_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 8),
+ AUB_TRACE_CC_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x9),
+ AUB_TRACE_SAMPLER_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xa),
+ AUB_TRACE_KERNEL_INSTRUCTIONS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xb),
+ AUB_TRACE_SCRATCH_SPACE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xc),
+ AUB_TRACE_SAMPLER_DEFAULT_COLOR = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xd),
+
+ AUB_TRACE_SCISSOR_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x15),
+ AUB_TRACE_BLEND_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x16),
+ AUB_TRACE_DEPTH_STENCIL_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x17),
+
+ AUB_TRACE_VERTEX_BUFFER = ENCODE_SS_TYPE(AUB_TRACE_TYPE_VERTEX_BUFFER, 0),
+ AUB_TRACE_BINDING_TABLE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x100),
+ AUB_TRACE_SURFACE_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x200),
+ AUB_TRACE_VS_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 0),
+ AUB_TRACE_WM_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 1),
+};
+
+#undef ENCODE_SS_TYPE
+
+/**
+ * Decode a aub_state_struct_type value to determine the type that should be
+ * stored in the .aub file.
+ */
+static inline uint32_t AUB_TRACE_TYPE(enum aub_state_struct_type ss_type)
+{
+ return (ss_type & 0xFFFF0000) >> 16;
+}
+
+/**
+ * Decode a state_struct_type value to determine the subtype that should be
+ * stored in the .aub file.
+ */
+static inline uint32_t AUB_TRACE_SUBTYPE(enum aub_state_struct_type ss_type)
+{
+ return ss_type & 0xFFFF;
+}
+
+/* DW3: address */
+/* DW4: len */
+
+#endif /* _INTEL_AUB_H */
diff --git a/chromium/third_party/libdrm/src/intel/intel_bufmgr.c b/chromium/third_party/libdrm/src/intel/intel_bufmgr.c
new file mode 100644
index 00000000000..a285340039f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel_bufmgr.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <assert.h>
+#include <errno.h>
+#include <drm.h>
+#include <i915_drm.h>
+#include <pciaccess.h>
+#include "libdrm_macros.h"
+#include "intel_bufmgr.h"
+#include "intel_bufmgr_priv.h"
+#include "xf86drm.h"
+
+/** @file intel_bufmgr.c
+ *
+ * Convenience functions for buffer management methods.
+ */
+
+drm_intel_bo *
+drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
+{
+ return bufmgr->bo_alloc(bufmgr, name, size, alignment);
+}
+
+drm_intel_bo *
+drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
+{
+ return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
+}
+
+drm_intel_bo *
+drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name, void *addr,
+ uint32_t tiling_mode,
+ uint32_t stride,
+ unsigned long size,
+ unsigned long flags)
+{
+ if (bufmgr->bo_alloc_userptr)
+ return bufmgr->bo_alloc_userptr(bufmgr, name, addr, tiling_mode,
+ stride, size, flags);
+ return NULL;
+}
+
+drm_intel_bo *
+drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
+ int x, int y, int cpp, uint32_t *tiling_mode,
+ unsigned long *pitch, unsigned long flags)
+{
+ return bufmgr->bo_alloc_tiled(bufmgr, name, x, y, cpp,
+ tiling_mode, pitch, flags);
+}
+
+void
+drm_intel_bo_reference(drm_intel_bo *bo)
+{
+ bo->bufmgr->bo_reference(bo);
+}
+
+void
+drm_intel_bo_unreference(drm_intel_bo *bo)
+{
+ if (bo == NULL)
+ return;
+
+ bo->bufmgr->bo_unreference(bo);
+}
+
+int
+drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
+{
+ return buf->bufmgr->bo_map(buf, write_enable);
+}
+
+int
+drm_intel_bo_unmap(drm_intel_bo *buf)
+{
+ return buf->bufmgr->bo_unmap(buf);
+}
+
+int
+drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ return bo->bufmgr->bo_subdata(bo, offset, size, data);
+}
+
+int
+drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
+{
+ int ret;
+ if (bo->bufmgr->bo_get_subdata)
+ return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
+
+ if (size == 0 || data == NULL)
+ return 0;
+
+ ret = drm_intel_bo_map(bo, 0);
+ if (ret)
+ return ret;
+ memcpy(data, (unsigned char *)bo->virtual + offset, size);
+ drm_intel_bo_unmap(bo);
+ return 0;
+}
+
+void
+drm_intel_bo_wait_rendering(drm_intel_bo *bo)
+{
+ bo->bufmgr->bo_wait_rendering(bo);
+}
+
+void
+drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
+{
+ bufmgr->destroy(bufmgr);
+}
+
+int
+drm_intel_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
+{
+ return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
+}
+
+int
+drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
+ unsigned int rings)
+{
+ if (bo->bufmgr->bo_mrb_exec)
+ return bo->bufmgr->bo_mrb_exec(bo, used,
+ cliprects, num_cliprects, DR4,
+ rings);
+
+ switch (rings) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ return bo->bufmgr->bo_exec(bo, used,
+ cliprects, num_cliprects, DR4);
+ default:
+ return -ENODEV;
+ }
+}
+
+void
+drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
+{
+ bufmgr->debug = enable_debug;
+}
+
+int
+drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
+{
+ return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
+}
+
+int
+drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
+{
+ if (bo->bufmgr->bo_flink)
+ return bo->bufmgr->bo_flink(bo, name);
+
+ return -ENODEV;
+}
+
+int
+drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ return bo->bufmgr->bo_emit_reloc(bo, offset,
+ target_bo, target_offset,
+ read_domains, write_domain);
+}
+
+/* For fence registers, not GL fences */
+int
+drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ return bo->bufmgr->bo_emit_reloc_fence(bo, offset,
+ target_bo, target_offset,
+ read_domains, write_domain);
+}
+
+
+int
+drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
+{
+ if (bo->bufmgr->bo_pin)
+ return bo->bufmgr->bo_pin(bo, alignment);
+
+ return -ENODEV;
+}
+
+int
+drm_intel_bo_unpin(drm_intel_bo *bo)
+{
+ if (bo->bufmgr->bo_unpin)
+ return bo->bufmgr->bo_unpin(bo);
+
+ return -ENODEV;
+}
+
+int
+drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride)
+{
+ if (bo->bufmgr->bo_set_tiling)
+ return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
+
+ *tiling_mode = I915_TILING_NONE;
+ return 0;
+}
+
+int
+drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode)
+{
+ if (bo->bufmgr->bo_get_tiling)
+ return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
+
+ *tiling_mode = I915_TILING_NONE;
+ *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ return 0;
+}
+
+int
+drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
+{
+ if (bo->bufmgr->bo_set_softpin_offset)
+ return bo->bufmgr->bo_set_softpin_offset(bo, offset);
+
+ return -ENODEV;
+}
+
+int
+drm_intel_bo_disable_reuse(drm_intel_bo *bo)
+{
+ if (bo->bufmgr->bo_disable_reuse)
+ return bo->bufmgr->bo_disable_reuse(bo);
+ return 0;
+}
+
+int
+drm_intel_bo_is_reusable(drm_intel_bo *bo)
+{
+ if (bo->bufmgr->bo_is_reusable)
+ return bo->bufmgr->bo_is_reusable(bo);
+ return 0;
+}
+
+int
+drm_intel_bo_busy(drm_intel_bo *bo)
+{
+ if (bo->bufmgr->bo_busy)
+ return bo->bufmgr->bo_busy(bo);
+ return 0;
+}
+
+int
+drm_intel_bo_madvise(drm_intel_bo *bo, int madv)
+{
+ if (bo->bufmgr->bo_madvise)
+ return bo->bufmgr->bo_madvise(bo, madv);
+ return -1;
+}
+
+int
+drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
+{
+ if (bo->bufmgr->bo_use_48b_address_range) {
+ bo->bufmgr->bo_use_48b_address_range(bo, enable);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+int
+drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
+{
+ return bo->bufmgr->bo_references(bo, target_bo);
+}
+
+int
+drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
+{
+ if (bufmgr->get_pipe_from_crtc_id)
+ return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
+ return -1;
+}
+
+static size_t
+drm_intel_probe_agp_aperture_size(int fd)
+{
+ struct pci_device *pci_dev;
+ size_t size = 0;
+ int ret;
+
+ ret = pci_system_init();
+ if (ret)
+ goto err;
+
+ /* XXX handle multiple adaptors? */
+ pci_dev = pci_device_find_by_slot(0, 0, 2, 0);
+ if (pci_dev == NULL)
+ goto err;
+
+ ret = pci_device_probe(pci_dev);
+ if (ret)
+ goto err;
+
+ size = pci_dev->regions[2].size;
+err:
+ pci_system_cleanup ();
+ return size;
+}
+
+int
+drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total)
+{
+
+ struct drm_i915_gem_get_aperture aperture;
+ int ret;
+
+ ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+ if (ret)
+ return ret;
+
+ *mappable = 0;
+ /* XXX add a query for the kernel value? */
+ if (*mappable == 0)
+ *mappable = drm_intel_probe_agp_aperture_size(fd);
+ if (*mappable == 0)
+ *mappable = 64 * 1024 * 1024; /* minimum possible value */
+ *total = aperture.aper_size;
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/intel/intel_bufmgr.h b/chromium/third_party/libdrm/src/intel/intel_bufmgr.h
new file mode 100644
index 00000000000..a1abbcd2b06
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel_bufmgr.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/**
+ * @file intel_bufmgr.h
+ *
+ * Public definitions of Intel-specific bufmgr functions.
+ */
+
+#ifndef INTEL_BUFMGR_H
+#define INTEL_BUFMGR_H
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+struct drm_clip_rect;
+
+typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
+typedef struct _drm_intel_context drm_intel_context;
+typedef struct _drm_intel_bo drm_intel_bo;
+
+struct _drm_intel_bo {
+ /**
+ * Size in bytes of the buffer object.
+ *
+ * The size may be larger than the size originally requested for the
+ * allocation, such as being aligned to page size.
+ */
+ unsigned long size;
+
+ /**
+ * Alignment requirement for object
+ *
+ * Used for GTT mapping & pinning the object.
+ */
+ unsigned long align;
+
+ /**
+ * Deprecated field containing (possibly the low 32-bits of) the last
+ * seen virtual card address. Use offset64 instead.
+ */
+ unsigned long offset;
+
+ /**
+ * Virtual address for accessing the buffer data. Only valid while
+ * mapped.
+ */
+#ifdef __cplusplus
+ void *virt;
+#else
+ void *virtual;
+#endif
+
+ /** Buffer manager context associated with this buffer object */
+ drm_intel_bufmgr *bufmgr;
+
+ /**
+ * MM-specific handle for accessing object
+ */
+ int handle;
+
+ /**
+ * Last seen card virtual address (offset from the beginning of the
+ * aperture) for the object. This should be used to fill relocation
+ * entries when calling drm_intel_bo_emit_reloc()
+ */
+ uint64_t offset64;
+};
+
+enum aub_dump_bmp_format {
+ AUB_DUMP_BMP_FORMAT_8BIT = 1,
+ AUB_DUMP_BMP_FORMAT_ARGB_4444 = 4,
+ AUB_DUMP_BMP_FORMAT_ARGB_0888 = 6,
+ AUB_DUMP_BMP_FORMAT_ARGB_8888 = 7,
+};
+
+typedef struct _drm_intel_aub_annotation {
+ uint32_t type;
+ uint32_t subtype;
+ uint32_t ending_offset;
+} drm_intel_aub_annotation;
+
+#define BO_ALLOC_FOR_RENDER (1<<0)
+
+drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
+drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment);
+drm_intel_bo *drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr, uint32_t tiling_mode,
+ uint32_t stride, unsigned long size,
+ unsigned long flags);
+drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags);
+void drm_intel_bo_reference(drm_intel_bo *bo);
+void drm_intel_bo_unreference(drm_intel_bo *bo);
+int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
+int drm_intel_bo_unmap(drm_intel_bo *bo);
+
+int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
+
+void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
+void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
+int drm_intel_bo_exec(drm_intel_bo *bo, int used,
+ struct drm_clip_rect *cliprects, int num_cliprects, int DR4);
+int drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
+ struct drm_clip_rect *cliprects, int num_cliprects, int DR4,
+ unsigned int flags);
+int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
+
+int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+int drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo,
+ uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
+int drm_intel_bo_unpin(drm_intel_bo *bo);
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride);
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode);
+int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
+int drm_intel_bo_busy(drm_intel_bo *bo);
+int drm_intel_bo_madvise(drm_intel_bo *bo, int madv);
+int drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable);
+int drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset);
+
+int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
+int drm_intel_bo_is_reusable(drm_intel_bo *bo);
+int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo);
+
+/* drm_intel_bufmgr_gem.c */
+drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
+drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle);
+void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr,
+ int limit);
+int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo);
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
+int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
+
+int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
+void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start);
+void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
+
+void
+drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
+ const char *filename);
+void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable);
+void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
+ int x1, int y1, int width, int height,
+ enum aub_dump_bmp_format format,
+ int pitch, int offset);
+void
+drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
+ drm_intel_aub_annotation *annotations,
+ unsigned count);
+
+int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id);
+
+int drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total);
+int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr);
+int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns);
+
+drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr);
+void drm_intel_gem_context_destroy(drm_intel_context *ctx);
+int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
+ int used, unsigned int flags);
+
+int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd);
+drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
+ int prime_fd, int size);
+
+/* drm_intel_bufmgr_fake.c */
+drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
+ unsigned long low_offset,
+ void *low_virtual,
+ unsigned long size,
+ volatile unsigned int
+ *last_dispatch);
+void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
+ volatile unsigned int
+ *last_dispatch);
+void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
+ int (*exec) (drm_intel_bo *bo,
+ unsigned int used,
+ void *priv),
+ void *priv);
+void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
+ unsigned int (*emit) (void *priv),
+ void (*wait) (unsigned int fence,
+ void *priv),
+ void *priv);
+drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long offset,
+ unsigned long size, void *virt);
+void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
+ void (*invalidate_cb) (drm_intel_bo
+ * bo,
+ void *ptr),
+ void *ptr);
+
+void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
+
+struct drm_intel_decode *drm_intel_decode_context_alloc(uint32_t devid);
+void drm_intel_decode_context_free(struct drm_intel_decode *ctx);
+void drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
+ void *data, uint32_t hw_offset,
+ int count);
+void drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
+ int dump_past_end);
+void drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
+ uint32_t head, uint32_t tail);
+void drm_intel_decode_set_output_file(struct drm_intel_decode *ctx, FILE *out);
+void drm_intel_decode(struct drm_intel_decode *ctx);
+
+int drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
+ uint32_t offset,
+ uint64_t *result);
+
+int drm_intel_get_reset_stats(drm_intel_context *ctx,
+ uint32_t *reset_count,
+ uint32_t *active,
+ uint32_t *pending);
+
+int drm_intel_get_subslice_total(int fd, unsigned int *subslice_total);
+int drm_intel_get_eu_total(int fd, unsigned int *eu_total);
+
+/** @{ Compatibility defines to keep old code building despite the symbol rename
+ * from dri_* to drm_intel_*
+ */
+#define dri_bo drm_intel_bo
+#define dri_bufmgr drm_intel_bufmgr
+#define dri_bo_alloc drm_intel_bo_alloc
+#define dri_bo_reference drm_intel_bo_reference
+#define dri_bo_unreference drm_intel_bo_unreference
+#define dri_bo_map drm_intel_bo_map
+#define dri_bo_unmap drm_intel_bo_unmap
+#define dri_bo_subdata drm_intel_bo_subdata
+#define dri_bo_get_subdata drm_intel_bo_get_subdata
+#define dri_bo_wait_rendering drm_intel_bo_wait_rendering
+#define dri_bufmgr_set_debug drm_intel_bufmgr_set_debug
+#define dri_bufmgr_destroy drm_intel_bufmgr_destroy
+#define dri_bo_exec drm_intel_bo_exec
+#define dri_bufmgr_check_aperture_space drm_intel_bufmgr_check_aperture_space
+#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
+ reloc_offset, target_bo) \
+ drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
+ target_bo, target_offset, \
+ read, write);
+#define dri_bo_pin drm_intel_bo_pin
+#define dri_bo_unpin drm_intel_bo_unpin
+#define dri_bo_get_tiling drm_intel_bo_get_tiling
+#define dri_bo_set_tiling(bo, mode) drm_intel_bo_set_tiling(bo, mode, 0)
+#define dri_bo_flink drm_intel_bo_flink
+#define intel_bufmgr_gem_init drm_intel_bufmgr_gem_init
+#define intel_bo_gem_create_from_name drm_intel_bo_gem_create_from_name
+#define intel_bufmgr_gem_enable_reuse drm_intel_bufmgr_gem_enable_reuse
+#define intel_bufmgr_fake_init drm_intel_bufmgr_fake_init
+#define intel_bufmgr_fake_set_last_dispatch drm_intel_bufmgr_fake_set_last_dispatch
+#define intel_bufmgr_fake_set_exec_callback drm_intel_bufmgr_fake_set_exec_callback
+#define intel_bufmgr_fake_set_fence_callback drm_intel_bufmgr_fake_set_fence_callback
+#define intel_bo_fake_alloc_static drm_intel_bo_fake_alloc_static
+#define intel_bo_fake_disable_backing_store drm_intel_bo_fake_disable_backing_store
+#define intel_bufmgr_fake_contended_lock_take drm_intel_bufmgr_fake_contended_lock_take
+#define intel_bufmgr_fake_evict_all drm_intel_bufmgr_fake_evict_all
+
+/** @{ */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* INTEL_BUFMGR_H */
diff --git a/chromium/third_party/libdrm/src/intel/intel_bufmgr_fake.c b/chromium/third_party/libdrm/src/intel/intel_bufmgr_fake.c
new file mode 100644
index 00000000000..24b3732aa14
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel_bufmgr_fake.c
@@ -0,0 +1,1630 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Originally a fake version of the buffer manager so that we can
+ * prototype the changes in a driver fairly quickly, has been fleshed
+ * out to a fully functional interim solution.
+ *
+ * Basically wraps the old style memory management in the new
+ * programming interface, but is more expressive and avoids many of
+ * the bugs in the old texture manager.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+#include <strings.h>
+#include <xf86drm.h>
+#include <pthread.h>
+#include "intel_bufmgr.h"
+#include "intel_bufmgr_priv.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "mm.h"
+#include "libdrm_macros.h"
+#include "libdrm_lists.h"
+
+#define DBG(...) do { \
+ if (bufmgr_fake->bufmgr.debug) \
+ drmMsg(__VA_ARGS__); \
+} while (0)
+
+/* Internal flags:
+ */
+#define BM_NO_BACKING_STORE 0x00000001
+#define BM_NO_FENCE_SUBDATA 0x00000002
+#define BM_PINNED 0x00000004
+
+/* Wrapper around mm.c's mem_block, which understands that you must
+ * wait for fences to expire before memory can be freed. This is
+ * specific to our use of memcpy for uploads - an upload that was
+ * processed through the command queue wouldn't need to care about
+ * fences.
+ */
+#define MAX_RELOCS 4096
+
+struct fake_buffer_reloc {
+ /** Buffer object that the relocation points at. */
+ drm_intel_bo *target_buf;
+ /** Offset of the relocation entry within reloc_buf. */
+ uint32_t offset;
+ /**
+ * Cached value of the offset when we last performed this relocation.
+ */
+ uint32_t last_target_offset;
+ /** Value added to target_buf's offset to get the relocation entry. */
+ uint32_t delta;
+ /** Cache domains the target buffer is read into. */
+ uint32_t read_domains;
+ /** Cache domain the target buffer will have dirty cachelines in. */
+ uint32_t write_domain;
+};
+
+struct block {
+ struct block *next, *prev;
+ struct mem_block *mem; /* BM_MEM_AGP */
+
+ /**
+ * Marks that the block is currently in the aperture and has yet to be
+ * fenced.
+ */
+ unsigned on_hardware:1;
+ /**
+ * Marks that the block is currently fenced (being used by rendering)
+ * and can't be freed until @fence is passed.
+ */
+ unsigned fenced:1;
+
+ /** Fence cookie for the block. */
+ unsigned fence; /* Split to read_fence, write_fence */
+
+ drm_intel_bo *bo;
+ void *virtual;
+};
+
+typedef struct _bufmgr_fake {
+ drm_intel_bufmgr bufmgr;
+
+ pthread_mutex_t lock;
+
+ unsigned long low_offset;
+ unsigned long size;
+ void *virtual;
+
+ struct mem_block *heap;
+
+ unsigned buf_nr; /* for generating ids */
+
+ /**
+ * List of blocks which are currently in the GART but haven't been
+ * fenced yet.
+ */
+ struct block on_hardware;
+ /**
+ * List of blocks which are in the GART and have an active fence on
+ * them.
+ */
+ struct block fenced;
+ /**
+ * List of blocks which have an expired fence and are ready to be
+ * evicted.
+ */
+ struct block lru;
+
+ unsigned int last_fence;
+
+ unsigned fail:1;
+ unsigned need_fence:1;
+ int thrashing;
+
+ /**
+ * Driver callback to emit a fence, returning the cookie.
+ *
+ * This allows the driver to hook in a replacement for the DRM usage in
+ * bufmgr_fake.
+ *
+ * Currently, this also requires that a write flush be emitted before
+ * emitting the fence, but this should change.
+ */
+ unsigned int (*fence_emit) (void *private);
+ /** Driver callback to wait for a fence cookie to have passed. */
+ void (*fence_wait) (unsigned int fence, void *private);
+ void *fence_priv;
+
+ /**
+ * Driver callback to execute a buffer.
+ *
+ * This allows the driver to hook in a replacement for the DRM usage in
+ * bufmgr_fake.
+ */
+ int (*exec) (drm_intel_bo *bo, unsigned int used, void *priv);
+ void *exec_priv;
+
+ /** Driver-supplied argument to driver callbacks */
+ void *driver_priv;
+ /**
+ * Pointer to kernel-updated sarea data for the last completed user irq
+ */
+ volatile int *last_dispatch;
+
+ int fd;
+
+ int debug;
+
+ int performed_rendering;
+} drm_intel_bufmgr_fake;
+
+typedef struct _drm_intel_bo_fake {
+ drm_intel_bo bo;
+
+ unsigned id; /* debug only */
+ const char *name;
+
+ unsigned dirty:1;
+ /**
+ * has the card written to this buffer - we make need to copy it back
+ */
+ unsigned card_dirty:1;
+ unsigned int refcount;
+ /* Flags may consist of any of the DRM_BO flags, plus
+ * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the
+ * first two driver private flags.
+ */
+ uint64_t flags;
+ /** Cache domains the target buffer is read into. */
+ uint32_t read_domains;
+ /** Cache domain the target buffer will have dirty cachelines in. */
+ uint32_t write_domain;
+
+ unsigned int alignment;
+ int is_static, validated;
+ unsigned int map_count;
+
+ /** relocation list */
+ struct fake_buffer_reloc *relocs;
+ int nr_relocs;
+ /**
+ * Total size of the target_bos of this buffer.
+ *
+ * Used for estimation in check_aperture.
+ */
+ unsigned int child_size;
+
+ struct block *block;
+ void *backing_store;
+ void (*invalidate_cb) (drm_intel_bo *bo, void *ptr);
+ void *invalidate_ptr;
+} drm_intel_bo_fake;
+
+static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
+ unsigned int fence_cookie);
+
+#define MAXFENCE 0x7fffffff
+
+static int
+FENCE_LTE(unsigned a, unsigned b)
+{
+ if (a == b)
+ return 1;
+
+ if (a < b && b - a < (1 << 24))
+ return 1;
+
+ if (a > b && MAXFENCE - a + b < (1 << 24))
+ return 1;
+
+ return 0;
+}
+
+void
+drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
+ unsigned int (*emit) (void *priv),
+ void (*wait) (unsigned int fence,
+ void *priv),
+ void *priv)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ bufmgr_fake->fence_emit = emit;
+ bufmgr_fake->fence_wait = wait;
+ bufmgr_fake->fence_priv = priv;
+}
+
+static unsigned int
+_fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake)
+{
+ struct drm_i915_irq_emit ie;
+ int ret, seq = 1;
+
+ if (bufmgr_fake->fence_emit != NULL) {
+ seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
+ return seq;
+ }
+
+ ie.irq_seq = &seq;
+ ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
+ &ie, sizeof(ie));
+ if (ret) {
+ drmMsg("%s: drm_i915_irq_emit: %d\n", __func__, ret);
+ abort();
+ }
+
+ DBG("emit 0x%08x\n", seq);
+ return seq;
+}
+
+static void
+_fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
+{
+ struct drm_i915_irq_wait iw;
+ int hw_seq, busy_count = 0;
+ int ret;
+ int kernel_lied;
+
+ if (bufmgr_fake->fence_wait != NULL) {
+ bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
+ clear_fenced(bufmgr_fake, seq);
+ return;
+ }
+
+ iw.irq_seq = seq;
+
+ DBG("wait 0x%08x\n", iw.irq_seq);
+
+ /* The kernel IRQ_WAIT implementation is all sorts of broken.
+ * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit
+ * unsigned range.
+ * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
+ * signed range.
+ * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
+ * signed range.
+ * 4) It returns -EBUSY in 3 seconds even if the hardware is still
+ * successfully chewing through buffers.
+ *
+ * Assume that in userland we treat sequence numbers as ints, which
+ * makes some of the comparisons convenient, since the sequence
+ * numbers are all positive signed integers.
+ *
+ * From this we get several cases we need to handle. Here's a timeline.
+ * 0x2 0x7 0x7ffffff8 0x7ffffffd
+ * | | | |
+ * ------------------------------------------------------------
+ *
+ * A) Normal wait for hw to catch up
+ * hw_seq seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to
+ * catch up.
+ *
+ * B) Normal wait for a sequence number that's already passed.
+ * seq hw_seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly.
+ *
+ * C) Hardware has already wrapped around ahead of us
+ * hw_seq seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait
+ * for hw_seq >= seq, which may never occur. Thus, we want to catch
+ * this in userland and return 0.
+ *
+ * D) We've wrapped around ahead of the hardware.
+ * seq hw_seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would
+ * return 0 quickly because hw_seq >= seq, even though the hardware
+ * isn't caught up. Thus, we need to catch this early return in
+ * userland and bother the kernel until the hardware really does
+ * catch up.
+ *
+ * E) Hardware might wrap after we test in userland.
+ * hw_seq seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >=
+ * hw_seq and wait. However, suppose hw_seq wraps before we make it
+ * into the kernel. The kernel sees hw_seq >= seq and waits for 3
+ * seconds then returns -EBUSY. This is case C). We should catch
+ * this and then return successfully.
+ *
+ * F) Hardware might take a long time on a buffer.
+ * hw_seq seq
+ * | |
+ * -------------------------------------------------------------------
+ * seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5
+ * take too long, it will return -EBUSY. Batchbuffers in the
+ * gltestperf demo were seen to take up to 7 seconds. We should
+ * catch early -EBUSY return and keep trying.
+ */
+
+ do {
+ /* Keep a copy of last_dispatch so that if the wait -EBUSYs
+ * because the hardware didn't catch up in 3 seconds, we can
+ * see if it at least made progress and retry.
+ */
+ hw_seq = *bufmgr_fake->last_dispatch;
+
+ /* Catch case C */
+ if (seq - hw_seq > 0x40000000)
+ return;
+
+ ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT,
+ &iw, sizeof(iw));
+ /* Catch case D */
+ kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch <
+ -0x40000000);
+
+ /* Catch case E */
+ if (ret == -EBUSY
+ && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
+ ret = 0;
+
+ /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
+ if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch))
+ busy_count = 0;
+ else
+ busy_count++;
+ } while (kernel_lied || ret == -EAGAIN || ret == -EINTR ||
+ (ret == -EBUSY && busy_count < 5));
+
+ if (ret != 0) {
+ drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__,
+ __LINE__, strerror(-ret));
+ abort();
+ }
+ clear_fenced(bufmgr_fake, seq);
+}
+
+static int
+_fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+ /* Slight problem with wrap-around:
+ */
+ return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
+}
+
+/**
+ * Allocate a memory manager block for the buffer.
+ */
+static int
+alloc_block(drm_intel_bo *bo)
+{
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ struct block *block = (struct block *)calloc(sizeof *block, 1);
+ unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
+ unsigned int sz;
+
+ if (!block)
+ return 1;
+
+ sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
+
+ block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
+ if (!block->mem) {
+ free(block);
+ return 0;
+ }
+
+ DRMINITLISTHEAD(block);
+
+ /* Insert at head or at tail??? */
+ DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+
+ block->virtual = (uint8_t *) bufmgr_fake->virtual +
+ block->mem->ofs - bufmgr_fake->low_offset;
+ block->bo = bo;
+
+ bo_fake->block = block;
+
+ return 1;
+}
+
+/* Release the card storage associated with buf:
+ */
+static void
+free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
+ int skip_dirty_copy)
+{
+ drm_intel_bo_fake *bo_fake;
+ DBG("free block %p %08x %d %d\n", block, block->mem->ofs,
+ block->on_hardware, block->fenced);
+
+ if (!block)
+ return;
+
+ bo_fake = (drm_intel_bo_fake *) block->bo;
+
+ if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
+ skip_dirty_copy = 1;
+
+ if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
+ memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
+ bo_fake->card_dirty = 0;
+ bo_fake->dirty = 1;
+ }
+
+ if (block->on_hardware) {
+ block->bo = NULL;
+ } else if (block->fenced) {
+ block->bo = NULL;
+ } else {
+ DBG(" - free immediately\n");
+ DRMLISTDEL(block);
+
+ mmFreeMem(block->mem);
+ free(block);
+ }
+}
+
+static void
+alloc_backing_store(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ assert(!bo_fake->backing_store);
+ assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
+
+ bo_fake->backing_store = malloc(bo->size);
+
+ DBG("alloc_backing - buf %d %p %lu\n", bo_fake->id,
+ bo_fake->backing_store, bo->size);
+ assert(bo_fake->backing_store);
+}
+
+static void
+free_backing_store(drm_intel_bo *bo)
+{
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ if (bo_fake->backing_store) {
+ assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
+ free(bo_fake->backing_store);
+ bo_fake->backing_store = NULL;
+ }
+}
+
+static void
+set_dirty(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ if (bo_fake->flags & BM_NO_BACKING_STORE
+ && bo_fake->invalidate_cb != NULL)
+ bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
+
+ assert(!(bo_fake->flags & BM_PINNED));
+
+ DBG("set_dirty - buf %d\n", bo_fake->id);
+ bo_fake->dirty = 1;
+}
+
+static int
+evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
+{
+ struct block *block, *tmp;
+
+ DBG("%s\n", __func__);
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+
+ if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ if (block->fence && max_fence && !FENCE_LTE(block->fence,
+ max_fence))
+ return 0;
+
+ set_dirty(&bo_fake->bo);
+ bo_fake->block = NULL;
+
+ free_block(bufmgr_fake, block, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
+{
+ struct block *block, *tmp;
+
+ DBG("%s\n", __func__);
+
+ DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+
+ if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ set_dirty(&bo_fake->bo);
+ bo_fake->block = NULL;
+
+ free_block(bufmgr_fake, block, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes all objects from the fenced list older than the given fence.
+ */
+static int
+clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int fence_cookie)
+{
+ struct block *block, *tmp;
+ int ret = 0;
+
+ bufmgr_fake->last_fence = fence_cookie;
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
+ assert(block->fenced);
+
+ if (_fence_test(bufmgr_fake, block->fence)) {
+
+ block->fenced = 0;
+
+ if (!block->bo) {
+ DBG("delayed free: offset %x sz %x\n",
+ block->mem->ofs, block->mem->size);
+ DRMLISTDEL(block);
+ mmFreeMem(block->mem);
+ free(block);
+ } else {
+ DBG("return to lru: offset %x sz %x\n",
+ block->mem->ofs, block->mem->size);
+ DRMLISTDEL(block);
+ DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+ }
+
+ ret = 1;
+ } else {
+ /* Blocks are ordered by fence, so if one fails, all
+ * from here will fail also:
+ */
+ DBG("fence not passed: offset %x sz %x %d %d \n",
+ block->mem->ofs, block->mem->size, block->fence,
+ bufmgr_fake->last_fence);
+ break;
+ }
+ }
+
+ DBG("%s: %d\n", __func__, ret);
+ return ret;
+}
+
+static void
+fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+ struct block *block, *tmp;
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+ DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n",
+ block, block->mem->size, block->mem->ofs, block->bo, fence);
+ block->fence = fence;
+
+ block->on_hardware = 0;
+ block->fenced = 1;
+
+ /* Move to tail of pending list here
+ */
+ DRMLISTDEL(block);
+ DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
+ }
+
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+}
+
+static int
+evict_and_alloc_block(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ assert(bo_fake->block == NULL);
+
+ /* Search for already free memory:
+ */
+ if (alloc_block(bo))
+ return 1;
+
+ /* If we're not thrashing, allow lru eviction to dig deeper into
+ * recently used textures. We'll probably be thrashing soon:
+ */
+ if (!bufmgr_fake->thrashing) {
+ while (evict_lru(bufmgr_fake, 0))
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ /* Keep thrashing counter alive?
+ */
+ if (bufmgr_fake->thrashing)
+ bufmgr_fake->thrashing = 20;
+
+ /* Wait on any already pending fences - here we are waiting for any
+ * freed memory that has been submitted to hardware and fenced to
+ * become available:
+ */
+ while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+ uint32_t fence = bufmgr_fake->fenced.next->fence;
+ _fence_wait_internal(bufmgr_fake, fence);
+
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
+ while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+ uint32_t fence = bufmgr_fake->fenced.next->fence;
+ _fence_wait_internal(bufmgr_fake, fence);
+ }
+
+ if (!bufmgr_fake->thrashing) {
+ DBG("thrashing\n");
+ }
+ bufmgr_fake->thrashing = 20;
+
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ while (evict_mru(bufmgr_fake))
+ if (alloc_block(bo))
+ return 1;
+
+ DBG("%s 0x%lx bytes failed\n", __func__, bo->size);
+
+ return 0;
+}
+
+/***********************************************************************
+ * Public functions
+ */
+
+/**
+ * Wait for hardware idle by emitting a fence and waiting for it.
+ */
+static void
+drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
+{
+ unsigned int cookie;
+
+ cookie = _fence_emit_internal(bufmgr_fake);
+ _fence_wait_internal(bufmgr_fake, cookie);
+}
+
+/**
+ * Wait for rendering to a buffer to complete.
+ *
+ * It is assumed that the bathcbuffer which performed the rendering included
+ * the necessary flushing.
+ */
+static void
+drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ if (bo_fake->block == NULL || !bo_fake->block->fenced)
+ return;
+
+ _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
+}
+
+static void
+drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ drm_intel_fake_bo_wait_rendering_locked(bo);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+/* Specifically ignore texture memory sharing.
+ * -- just evict everything
+ * -- and wait for idle
+ */
+void
+drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+ struct block *block, *tmp;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ bufmgr_fake->need_fence = 1;
+ bufmgr_fake->fail = 0;
+
+ /* Wait for hardware idle. We don't know where acceleration has been
+ * happening, so we'll need to wait anyway before letting anything get
+ * put on the card again.
+ */
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* Check that we hadn't released the lock without having fenced the last
+ * set of buffers.
+ */
+ assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ assert(_fence_test(bufmgr_fake, block->fence));
+ set_dirty(block->bo);
+ }
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+static drm_intel_bo *
+drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake;
+ drm_intel_bo_fake *bo_fake;
+
+ bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ assert(size != 0);
+
+ bo_fake = calloc(1, sizeof(*bo_fake));
+ if (!bo_fake)
+ return NULL;
+
+ bo_fake->bo.size = size;
+ bo_fake->bo.offset = -1;
+ bo_fake->bo.virtual = NULL;
+ bo_fake->bo.bufmgr = bufmgr;
+ bo_fake->refcount = 1;
+
+ /* Alignment must be a power of two */
+ assert((alignment & (alignment - 1)) == 0);
+ if (alignment == 0)
+ alignment = 1;
+ bo_fake->alignment = alignment;
+ bo_fake->id = ++bufmgr_fake->buf_nr;
+ bo_fake->name = name;
+ bo_fake->flags = 0;
+ bo_fake->is_static = 0;
+
+ DBG("drm_bo_alloc: (buf %d: %s, %lu kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ return &bo_fake->bo;
+}
+
+static drm_intel_bo *
+drm_intel_fake_bo_alloc_tiled(drm_intel_bufmgr * bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags)
+{
+ unsigned long stride, aligned_y;
+
+ /* No runtime tiling support for fake. */
+ *tiling_mode = I915_TILING_NONE;
+
+ /* Align it for being a render target. Shouldn't need anything else. */
+ stride = x * cpp;
+ stride = ROUND_UP_TO(stride, 64);
+
+ /* 965 subspan loading alignment */
+ aligned_y = ALIGN(y, 2);
+
+ *pitch = stride;
+
+ return drm_intel_fake_bo_alloc(bufmgr, name, stride * aligned_y,
+ 4096);
+}
+
+drm_intel_bo *
+drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long offset,
+ unsigned long size, void *virtual)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake;
+ drm_intel_bo_fake *bo_fake;
+
+ bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ assert(size != 0);
+
+ bo_fake = calloc(1, sizeof(*bo_fake));
+ if (!bo_fake)
+ return NULL;
+
+ bo_fake->bo.size = size;
+ bo_fake->bo.offset = offset;
+ bo_fake->bo.virtual = virtual;
+ bo_fake->bo.bufmgr = bufmgr;
+ bo_fake->refcount = 1;
+ bo_fake->id = ++bufmgr_fake->buf_nr;
+ bo_fake->name = name;
+ bo_fake->flags = BM_PINNED;
+ bo_fake->is_static = 1;
+
+ DBG("drm_bo_alloc_static: (buf %d: %s, %lu kb)\n", bo_fake->id,
+ bo_fake->name, bo_fake->bo.size / 1024);
+
+ return &bo_fake->bo;
+}
+
+static void
+drm_intel_fake_bo_reference(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ bo_fake->refcount++;
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+static void
+drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
+{
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ bo_fake->refcount++;
+}
+
+static void
+drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ int i;
+
+ if (--bo_fake->refcount == 0) {
+ assert(bo_fake->map_count == 0);
+ /* No remaining references, so free it */
+ if (bo_fake->block)
+ free_block(bufmgr_fake, bo_fake->block, 1);
+ free_backing_store(bo);
+
+ for (i = 0; i < bo_fake->nr_relocs; i++)
+ drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].
+ target_buf);
+
+ DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id,
+ bo_fake->name);
+
+ free(bo_fake->relocs);
+ free(bo);
+ }
+}
+
+static void
+drm_intel_fake_bo_unreference(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ drm_intel_fake_bo_unreference_locked(bo);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+/**
+ * Set the buffer as not requiring backing store, and instead get the callback
+ * invoked whenever it would be set dirty.
+ */
+void
+drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
+ void (*invalidate_cb) (drm_intel_bo *bo,
+ void *ptr),
+ void *ptr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ if (bo_fake->backing_store)
+ free_backing_store(bo);
+
+ bo_fake->flags |= BM_NO_BACKING_STORE;
+
+ DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
+ bo_fake->dirty = 1;
+ bo_fake->invalidate_cb = invalidate_cb;
+ bo_fake->invalidate_ptr = ptr;
+
+ /* Note that it is invalid right from the start. Also note
+ * invalidate_cb is called with the bufmgr locked, so cannot
+ * itself make bufmgr calls.
+ */
+ if (invalidate_cb != NULL)
+ invalidate_cb(bo, ptr);
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+/**
+ * Map a buffer into bo->virtual, allocating either card memory space (If
+ * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
+ */
+static int
+ drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ /* Static buffers are always mapped. */
+ if (bo_fake->is_static) {
+ if (bo_fake->card_dirty) {
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+ bo_fake->card_dirty = 0;
+ }
+ return 0;
+ }
+
+ /* Allow recursive mapping. Mesa may recursively map buffers with
+ * nested display loops, and it is used internally in bufmgr_fake
+ * for relocation.
+ */
+ if (bo_fake->map_count++ != 0)
+ return 0;
+
+ {
+ DBG("drm_bo_map: (buf %d: %s, %lu kb)\n", bo_fake->id,
+ bo_fake->name, bo_fake->bo.size / 1024);
+
+ if (bo->virtual != NULL) {
+ drmMsg("%s: already mapped\n", __func__);
+ abort();
+ } else if (bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)) {
+
+ if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+ DBG("%s: alloc failed\n", __func__);
+ bufmgr_fake->fail = 1;
+ return 1;
+ } else {
+ assert(bo_fake->block);
+ bo_fake->dirty = 0;
+
+ if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
+ bo_fake->block->fenced) {
+ drm_intel_fake_bo_wait_rendering_locked
+ (bo);
+ }
+
+ bo->virtual = bo_fake->block->virtual;
+ }
+ } else {
+ if (write_enable)
+ set_dirty(bo);
+
+ if (bo_fake->backing_store == 0)
+ alloc_backing_store(bo);
+
+ if ((bo_fake->card_dirty == 1) && bo_fake->block) {
+ if (bo_fake->block->fenced)
+ drm_intel_fake_bo_wait_rendering_locked
+ (bo);
+
+ memcpy(bo_fake->backing_store,
+ bo_fake->block->virtual,
+ bo_fake->block->bo->size);
+ bo_fake->card_dirty = 0;
+ }
+
+ bo->virtual = bo_fake->backing_store;
+ }
+ }
+
+ return 0;
+}
+
+static int
+ drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ ret = drm_intel_fake_bo_map_locked(bo, write_enable);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return ret;
+}
+
+static int
+ drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ /* Static buffers are always mapped. */
+ if (bo_fake->is_static)
+ return 0;
+
+ assert(bo_fake->map_count != 0);
+ if (--bo_fake->map_count != 0)
+ return 0;
+
+ DBG("drm_bo_unmap: (buf %d: %s, %lu kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ bo->virtual = NULL;
+
+ return 0;
+}
+
+static int drm_intel_fake_bo_unmap(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ ret = drm_intel_fake_bo_unmap_locked(bo);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return ret;
+}
+
+static int
+drm_intel_fake_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ int ret;
+
+ if (size == 0 || data == NULL)
+ return 0;
+
+ ret = drm_intel_bo_map(bo, 1);
+ if (ret)
+ return ret;
+ memcpy((unsigned char *)bo->virtual + offset, data, size);
+ drm_intel_bo_unmap(bo);
+ return 0;
+}
+
+static void
+ drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
+{
+ struct block *block, *tmp;
+
+ bufmgr_fake->performed_rendering = 0;
+ /* okay for ever BO that is on the HW kick it off.
+ seriously not afraid of the POLICE right now */
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+
+ block->on_hardware = 0;
+ free_block(bufmgr_fake, block, 0);
+ bo_fake->block = NULL;
+ bo_fake->validated = 0;
+ if (!(bo_fake->flags & BM_NO_BACKING_STORE))
+ bo_fake->dirty = 1;
+ }
+
+}
+
+static int
+ drm_intel_fake_bo_validate(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ bufmgr_fake = (drm_intel_bufmgr_fake *) bo->bufmgr;
+
+ DBG("drm_bo_validate: (buf %d: %s, %lu kb)\n", bo_fake->id,
+ bo_fake->name, bo_fake->bo.size / 1024);
+
+ /* Sanity check: Buffers should be unmapped before being validated.
+ * This is not so much of a problem for bufmgr_fake, but TTM refuses,
+ * and the problem is harder to debug there.
+ */
+ assert(bo_fake->map_count == 0);
+
+ if (bo_fake->is_static) {
+ /* Add it to the needs-fence list */
+ bufmgr_fake->need_fence = 1;
+ return 0;
+ }
+
+ /* Allocate the card memory */
+ if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+ bufmgr_fake->fail = 1;
+ DBG("Failed to validate buf %d:%s\n", bo_fake->id,
+ bo_fake->name);
+ return -1;
+ }
+
+ assert(bo_fake->block);
+ assert(bo_fake->block->bo == &bo_fake->bo);
+
+ bo->offset = bo_fake->block->mem->ofs;
+
+ /* Upload the buffer contents if necessary */
+ if (bo_fake->dirty) {
+ DBG("Upload dirty buf %d:%s, sz %lu offset 0x%x\n", bo_fake->id,
+ bo_fake->name, bo->size, bo_fake->block->mem->ofs);
+
+ assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)));
+
+ /* Actually, should be able to just wait for a fence on the
+ * mmory, hich we would be tracking when we free it. Waiting
+ * for idle is a sufficiently large hammer for now.
+ */
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* we may never have mapped this BO so it might not have any
+ * backing store if this happens it should be rare, but 0 the
+ * card memory in any case */
+ if (bo_fake->backing_store)
+ memcpy(bo_fake->block->virtual, bo_fake->backing_store,
+ bo->size);
+ else
+ memset(bo_fake->block->virtual, 0, bo->size);
+
+ bo_fake->dirty = 0;
+ }
+
+ bo_fake->block->fenced = 0;
+ bo_fake->block->on_hardware = 1;
+ DRMLISTDEL(bo_fake->block);
+ DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
+
+ bo_fake->validated = 1;
+ bufmgr_fake->need_fence = 1;
+
+ return 0;
+}
+
+static void
+drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+ unsigned int cookie;
+
+ cookie = _fence_emit_internal(bufmgr_fake);
+ fence_blocks(bufmgr_fake, cookie);
+
+ DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
+}
+
+static void
+drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ pthread_mutex_destroy(&bufmgr_fake->lock);
+ mmDestroy(bufmgr_fake->heap);
+ free(bufmgr);
+}
+
+static int
+drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ struct fake_buffer_reloc *r;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *) target_bo;
+ int i;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ assert(bo);
+ assert(target_bo);
+
+ if (bo_fake->relocs == NULL) {
+ bo_fake->relocs =
+ malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
+ }
+
+ r = &bo_fake->relocs[bo_fake->nr_relocs++];
+
+ assert(bo_fake->nr_relocs <= MAX_RELOCS);
+
+ drm_intel_fake_bo_reference_locked(target_bo);
+
+ if (!target_fake->is_static) {
+ bo_fake->child_size +=
+ ALIGN(target_bo->size, target_fake->alignment);
+ bo_fake->child_size += target_fake->child_size;
+ }
+ r->target_buf = target_bo;
+ r->offset = offset;
+ r->last_target_offset = target_bo->offset;
+ r->delta = target_offset;
+ r->read_domains = read_domains;
+ r->write_domain = write_domain;
+
+ if (bufmgr_fake->debug) {
+ /* Check that a conflicting relocation hasn't already been
+ * emitted.
+ */
+ for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
+ struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
+
+ assert(r->offset != r2->offset);
+ }
+ }
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return 0;
+}
+
+/**
+ * Incorporates the validation flags associated with each relocation into
+ * the combined validation flags for the buffer on this batchbuffer submission.
+ */
+static void
+drm_intel_fake_calculate_domains(drm_intel_bo *bo)
+{
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ int i;
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ drm_intel_bo_fake *target_fake =
+ (drm_intel_bo_fake *) r->target_buf;
+
+ /* Do the same for the tree of buffers we depend on */
+ drm_intel_fake_calculate_domains(r->target_buf);
+
+ target_fake->read_domains |= r->read_domains;
+ target_fake->write_domain |= r->write_domain;
+ }
+}
+
+static int
+drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ int i, ret;
+
+ assert(bo_fake->map_count == 0);
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ drm_intel_bo_fake *target_fake =
+ (drm_intel_bo_fake *) r->target_buf;
+ uint32_t reloc_data;
+
+ /* Validate the target buffer if that hasn't been done. */
+ if (!target_fake->validated) {
+ ret =
+ drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
+ if (ret != 0) {
+ if (bo->virtual != NULL)
+ drm_intel_fake_bo_unmap_locked(bo);
+ return ret;
+ }
+ }
+
+ /* Calculate the value of the relocation entry. */
+ if (r->target_buf->offset != r->last_target_offset) {
+ reloc_data = r->target_buf->offset + r->delta;
+
+ if (bo->virtual == NULL)
+ drm_intel_fake_bo_map_locked(bo, 1);
+
+ *(uint32_t *) ((uint8_t *) bo->virtual + r->offset) =
+ reloc_data;
+
+ r->last_target_offset = r->target_buf->offset;
+ }
+ }
+
+ if (bo->virtual != NULL)
+ drm_intel_fake_bo_unmap_locked(bo);
+
+ if (bo_fake->write_domain != 0) {
+ if (!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED))) {
+ if (bo_fake->backing_store == 0)
+ alloc_backing_store(bo);
+ }
+ bo_fake->card_dirty = 1;
+ bufmgr_fake->performed_rendering = 1;
+ }
+
+ return drm_intel_fake_bo_validate(bo);
+}
+
+static void
+drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ int i;
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ drm_intel_bo_fake *target_fake =
+ (drm_intel_bo_fake *) r->target_buf;
+
+ if (target_fake->validated)
+ drm_intel_bo_fake_post_submit(r->target_buf);
+
+ DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
+ bo_fake->name, (uint32_t) bo->offset, r->offset,
+ target_fake->name, (uint32_t) r->target_buf->offset,
+ r->delta);
+ }
+
+ assert(bo_fake->map_count == 0);
+ bo_fake->validated = 0;
+ bo_fake->read_domains = 0;
+ bo_fake->write_domain = 0;
+}
+
+void
+drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
+ int (*exec) (drm_intel_bo *bo,
+ unsigned int used,
+ void *priv),
+ void *priv)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ bufmgr_fake->exec = exec;
+ bufmgr_fake->exec_priv = priv;
+}
+
+static int
+drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *) bo;
+ struct drm_i915_batchbuffer batch;
+ int ret;
+ int retry_count = 0;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ bufmgr_fake->performed_rendering = 0;
+
+ drm_intel_fake_calculate_domains(bo);
+
+ batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
+
+ /* we've ran out of RAM so blow the whole lot away and retry */
+restart:
+ ret = drm_intel_fake_reloc_and_validate_buffer(bo);
+ if (bufmgr_fake->fail == 1) {
+ if (retry_count == 0) {
+ retry_count++;
+ drm_intel_fake_kick_all_locked(bufmgr_fake);
+ bufmgr_fake->fail = 0;
+ goto restart;
+ } else /* dump out the memory here */
+ mmDumpMemInfo(bufmgr_fake->heap);
+ }
+
+ assert(ret == 0);
+
+ if (bufmgr_fake->exec != NULL) {
+ ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
+ if (ret != 0) {
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+ return ret;
+ }
+ } else {
+ batch.start = bo->offset;
+ batch.used = used;
+ batch.cliprects = cliprects;
+ batch.num_cliprects = num_cliprects;
+ batch.DR1 = 0;
+ batch.DR4 = DR4;
+
+ if (drmCommandWrite
+ (bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
+ sizeof(batch))) {
+ drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+ return -errno;
+ }
+ }
+
+ drm_intel_fake_fence_validated(bo->bufmgr);
+
+ drm_intel_bo_fake_post_submit(bo);
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return 0;
+}
+
+/**
+ * Return an error if the list of BOs will exceed the aperture size.
+ *
+ * This is a rough guess and likely to fail, as during the validate sequence we
+ * may place a buffer in an inopportune spot early on and then fail to fit
+ * a set smaller than the aperture.
+ */
+static int
+drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo_array[0]->bufmgr;
+ unsigned int sz = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo_array[i];
+
+ if (bo_fake == NULL)
+ continue;
+
+ if (!bo_fake->is_static)
+ sz += ALIGN(bo_array[i]->size, bo_fake->alignment);
+ sz += bo_fake->child_size;
+ }
+
+ if (sz > bufmgr_fake->size) {
+ DBG("check_space: overflowed bufmgr size, %ukb vs %lukb\n",
+ sz / 1024, bufmgr_fake->size / 1024);
+ return -1;
+ }
+
+ DBG("drm_check_space: sz %ukb vs bufgr %lukb\n", sz / 1024,
+ bufmgr_fake->size / 1024);
+ return 0;
+}
+
+/**
+ * Evicts all buffers, waiting for fences to pass and copying contents out
+ * as necessary.
+ *
+ * Used by the X Server on LeaveVT, when the card memory is no longer our
+ * own.
+ */
+void
+drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+ struct block *block, *tmp;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ bufmgr_fake->need_fence = 1;
+ bufmgr_fake->fail = 0;
+
+ /* Wait for hardware idle. We don't know where acceleration has been
+ * happening, so we'll need to wait anyway before letting anything get
+ * put on the card again.
+ */
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* Check that we hadn't released the lock without having fenced the last
+ * set of buffers.
+ */
+ assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+ /* Releases the memory, and memcpys dirty contents out if
+ * necessary.
+ */
+ free_block(bufmgr_fake, block, 0);
+ bo_fake->block = NULL;
+ }
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+void
+drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
+ volatile unsigned int
+ *last_dispatch)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
+}
+
+drm_intel_bufmgr *
+drm_intel_bufmgr_fake_init(int fd, unsigned long low_offset,
+ void *low_virtual, unsigned long size,
+ volatile unsigned int *last_dispatch)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake;
+
+ bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
+
+ if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
+ free(bufmgr_fake);
+ return NULL;
+ }
+
+ /* Initialize allocator */
+ DRMINITLISTHEAD(&bufmgr_fake->fenced);
+ DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
+ DRMINITLISTHEAD(&bufmgr_fake->lru);
+
+ bufmgr_fake->low_offset = low_offset;
+ bufmgr_fake->virtual = low_virtual;
+ bufmgr_fake->size = size;
+ bufmgr_fake->heap = mmInit(low_offset, size);
+
+ /* Hook in methods */
+ bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
+ bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
+ bufmgr_fake->bufmgr.bo_alloc_tiled = drm_intel_fake_bo_alloc_tiled;
+ bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
+ bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
+ bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
+ bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
+ bufmgr_fake->bufmgr.bo_subdata = drm_intel_fake_bo_subdata;
+ bufmgr_fake->bufmgr.bo_wait_rendering =
+ drm_intel_fake_bo_wait_rendering;
+ bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
+ bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
+ bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
+ bufmgr_fake->bufmgr.check_aperture_space =
+ drm_intel_fake_check_aperture_space;
+ bufmgr_fake->bufmgr.debug = 0;
+
+ bufmgr_fake->fd = fd;
+ bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
+
+ return &bufmgr_fake->bufmgr;
+}
diff --git a/chromium/third_party/libdrm/src/intel/intel_bufmgr_gem.c b/chromium/third_party/libdrm/src/intel/intel_bufmgr_gem.c
new file mode 100644
index 00000000000..0a4012beec5
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel_bufmgr_gem.c
@@ -0,0 +1,3522 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 Red Hat Inc.
+ * Copyright © 2007-2012 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <xf86drm.h>
+#include <xf86atomic.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <stdbool.h>
+
+#include "errno.h"
+#ifndef ETIME
+#define ETIME ETIMEDOUT
+#endif
+#include "libdrm_macros.h"
+#include "libdrm_lists.h"
+#include "intel_bufmgr.h"
+#include "intel_bufmgr_priv.h"
+#include "intel_chipset.h"
+#include "string.h"
+
+#include "i915_drm.h"
+
+#ifdef HAVE_VALGRIND
+#include <valgrind.h>
+#include <memcheck.h>
+#define VG(x) x
+#else
+#define VG(x)
+#endif
+
+#define memclear(s) memset(&s, 0, sizeof(s))
+
+#define DBG(...) do { \
+ if (bufmgr_gem->bufmgr.debug) \
+ fprintf(stderr, __VA_ARGS__); \
+} while (0)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define MAX2(A, B) ((A) > (B) ? (A) : (B))
+
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ *
+ * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
+ * the "right shift count >= width of type" warning when that quantity is
+ * 32-bits.
+ */
+#define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
+
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((__u32)(n))
+
+typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
+
+struct drm_intel_gem_bo_bucket {
+ drmMMListHead head;
+ unsigned long size;
+};
+
+typedef struct _drm_intel_bufmgr_gem {
+ drm_intel_bufmgr bufmgr;
+
+ atomic_t refcount;
+
+ int fd;
+
+ int max_relocs;
+
+ pthread_mutex_t lock;
+
+ struct drm_i915_gem_exec_object *exec_objects;
+ struct drm_i915_gem_exec_object2 *exec2_objects;
+ drm_intel_bo **exec_bos;
+ int exec_size;
+ int exec_count;
+
+ /** Array of lists of cached gem objects of power-of-two sizes */
+ struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
+ int num_buckets;
+ time_t time;
+
+ drmMMListHead managers;
+
+ drmMMListHead named;
+ drmMMListHead vma_cache;
+ int vma_count, vma_open, vma_max;
+
+ uint64_t gtt_size;
+ int available_fences;
+ int pci_device;
+ int gen;
+ unsigned int has_bsd : 1;
+ unsigned int has_blt : 1;
+ unsigned int has_relaxed_fencing : 1;
+ unsigned int has_llc : 1;
+ unsigned int has_wait_timeout : 1;
+ unsigned int bo_reuse : 1;
+ unsigned int no_exec : 1;
+ unsigned int has_vebox : 1;
+ bool fenced_relocs;
+
+ struct {
+ void *ptr;
+ uint32_t handle;
+ } userptr_active;
+
+} drm_intel_bufmgr_gem;
+
+#define DRM_INTEL_RELOC_FENCE (1<<0)
+
+typedef struct _drm_intel_reloc_target_info {
+ drm_intel_bo *bo;
+ int flags;
+} drm_intel_reloc_target;
+
+struct _drm_intel_bo_gem {
+ drm_intel_bo bo;
+
+ atomic_t refcount;
+ uint32_t gem_handle;
+ const char *name;
+
+ /**
+ * Kenel-assigned global name for this object
+ *
+ * List contains both flink named and prime fd'd objects
+ */
+ unsigned int global_name;
+ drmMMListHead name_list;
+
+ /**
+ * Index of the buffer within the validation list while preparing a
+ * batchbuffer execution.
+ */
+ int validate_index;
+
+ /**
+ * Current tiling mode
+ */
+ uint32_t tiling_mode;
+ uint32_t swizzle_mode;
+ unsigned long stride;
+
+ time_t free_time;
+
+ /** Array passed to the DRM containing relocation information. */
+ struct drm_i915_gem_relocation_entry *relocs;
+ /**
+ * Array of info structs corresponding to relocs[i].target_handle etc
+ */
+ drm_intel_reloc_target *reloc_target_info;
+ /** Number of entries in relocs */
+ int reloc_count;
+ /** Array of BOs that are referenced by this buffer and will be softpinned */
+ drm_intel_bo **softpin_target;
+ /** Number softpinned BOs that are referenced by this buffer */
+ int softpin_target_count;
+ /** Maximum amount of softpinned BOs that are referenced by this buffer */
+ int softpin_target_size;
+
+ /** Mapped address for the buffer, saved across map/unmap cycles */
+ void *mem_virtual;
+ /** GTT virtual address for the buffer, saved across map/unmap cycles */
+ void *gtt_virtual;
+ /**
+ * Virtual address of the buffer allocated by user, used for userptr
+ * objects only.
+ */
+ void *user_virtual;
+ int map_count;
+ drmMMListHead vma_list;
+
+ /** BO cache list */
+ drmMMListHead head;
+
+ /**
+ * Boolean of whether this BO and its children have been included in
+ * the current drm_intel_bufmgr_check_aperture_space() total.
+ */
+ bool included_in_check_aperture;
+
+ /**
+ * Boolean of whether this buffer has been used as a relocation
+ * target and had its size accounted for, and thus can't have any
+ * further relocations added to it.
+ */
+ bool used_as_reloc_target;
+
+ /**
+ * Boolean of whether we have encountered an error whilst building the relocation tree.
+ */
+ bool has_error;
+
+ /**
+ * Boolean of whether this buffer can be re-used
+ */
+ bool reusable;
+
+ /**
+ * Boolean of whether the GPU is definitely not accessing the buffer.
+ *
+ * This is only valid when reusable, since non-reusable
+ * buffers are those that have been shared wth other
+ * processes, so we don't know their state.
+ */
+ bool idle;
+
+ /**
+ * Boolean of whether this buffer was allocated with userptr
+ */
+ bool is_userptr;
+
+ /**
+ * Boolean of whether this buffer can be placed in the full 48-bit
+ * address range on gen8+.
+ *
+ * By default, buffers will be keep in a 32-bit range, unless this
+ * flag is explicitly set.
+ */
+ bool use_48b_address_range;
+
+ /**
+ * Whether this buffer is softpinned at offset specified by the user
+ */
+ bool is_softpin;
+
+ /**
+ * Size in bytes of this buffer and its relocation descendents.
+ *
+ * Used to avoid costly tree walking in
+ * drm_intel_bufmgr_check_aperture in the common case.
+ */
+ int reloc_tree_size;
+
+ /**
+ * Number of potential fence registers required by this buffer and its
+ * relocations.
+ */
+ int reloc_tree_fences;
+
+ /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
+ bool mapped_cpu_write;
+};
+
+static unsigned int
+drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
+
+static unsigned int
+drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
+
+static int
+drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode);
+
+static int
+drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
+ uint32_t tiling_mode,
+ uint32_t stride);
+
+static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
+ time_t time);
+
+static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
+
+static void drm_intel_gem_bo_free(drm_intel_bo *bo);
+
+static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
+{
+ return (drm_intel_bo_gem *)bo;
+}
+
+static unsigned long
+drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
+ uint32_t *tiling_mode)
+{
+ unsigned long min_size, max_size;
+ unsigned long i;
+
+ if (*tiling_mode == I915_TILING_NONE)
+ return size;
+
+ /* 965+ just need multiples of page size for tiling */
+ if (bufmgr_gem->gen >= 4)
+ return ROUND_UP_TO(size, 4096);
+
+ /* Older chips need powers of two, of at least 512k or 1M */
+ if (bufmgr_gem->gen == 3) {
+ min_size = 1024*1024;
+ max_size = 128*1024*1024;
+ } else {
+ min_size = 512*1024;
+ max_size = 64*1024*1024;
+ }
+
+ if (size > max_size) {
+ *tiling_mode = I915_TILING_NONE;
+ return size;
+ }
+
+ /* Do we need to allocate every page for the fence? */
+ if (bufmgr_gem->has_relaxed_fencing)
+ return ROUND_UP_TO(size, 4096);
+
+ for (i = min_size; i < size; i <<= 1)
+ ;
+
+ return i;
+}
+
+/*
+ * Round a given pitch up to the minimum required for X tiling on a
+ * given chip. We use 512 as the minimum to allow for a later tiling
+ * change.
+ */
+static unsigned long
+drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
+ unsigned long pitch, uint32_t *tiling_mode)
+{
+ unsigned long tile_width;
+ unsigned long i;
+
+ /* If untiled, then just align it so that we can do rendering
+ * to it with the 3D engine.
+ */
+ if (*tiling_mode == I915_TILING_NONE)
+ return ALIGN(pitch, 64);
+
+ if (*tiling_mode == I915_TILING_X
+ || (IS_915(bufmgr_gem->pci_device)
+ && *tiling_mode == I915_TILING_Y))
+ tile_width = 512;
+ else
+ tile_width = 128;
+
+ /* 965 is flexible */
+ if (bufmgr_gem->gen >= 4)
+ return ROUND_UP_TO(pitch, tile_width);
+
+ /* The older hardware has a maximum pitch of 8192 with tiled
+ * surfaces, so fallback to untiled if it's too large.
+ */
+ if (pitch > 8192) {
+ *tiling_mode = I915_TILING_NONE;
+ return ALIGN(pitch, 64);
+ }
+
+ /* Pre-965 needs power of two tile width */
+ for (i = tile_width; i < pitch; i <<= 1)
+ ;
+
+ return i;
+}
+
+static struct drm_intel_gem_bo_bucket *
+drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
+ unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < bufmgr_gem->num_buckets; i++) {
+ struct drm_intel_gem_bo_bucket *bucket =
+ &bufmgr_gem->cache_bucket[i];
+ if (bucket->size >= size) {
+ return bucket;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int i, j;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
+ DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
+ bo_gem->is_softpin ? "*" : "",
+ bo_gem->name);
+ continue;
+ }
+
+ for (j = 0; j < bo_gem->reloc_count; j++) {
+ drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
+ drm_intel_bo_gem *target_gem =
+ (drm_intel_bo_gem *) target_bo;
+
+ DBG("%2d: %d %s(%s)@0x%08x %08x -> "
+ "%d (%s)@0x%08x %08x + 0x%08x\n",
+ i,
+ bo_gem->gem_handle,
+ bo_gem->is_softpin ? "*" : "",
+ bo_gem->name,
+ upper_32_bits(bo_gem->relocs[j].offset),
+ lower_32_bits(bo_gem->relocs[j].offset),
+ target_gem->gem_handle,
+ target_gem->name,
+ upper_32_bits(target_bo->offset64),
+ lower_32_bits(target_bo->offset64),
+ bo_gem->relocs[j].delta);
+ }
+
+ for (j = 0; j < bo_gem->softpin_target_count; j++) {
+ drm_intel_bo *target_bo = bo_gem->softpin_target[j];
+ drm_intel_bo_gem *target_gem =
+ (drm_intel_bo_gem *) target_bo;
+ DBG("%2d: %d %s(%s) -> "
+ "%d *(%s)@0x%08x %08x\n",
+ i,
+ bo_gem->gem_handle,
+ bo_gem->is_softpin ? "*" : "",
+ bo_gem->name,
+ target_gem->gem_handle,
+ target_gem->name,
+ upper_32_bits(target_bo->offset64),
+ lower_32_bits(target_bo->offset64));
+ }
+ }
+}
+
+static inline void
+drm_intel_gem_bo_reference(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ atomic_inc(&bo_gem->refcount);
+}
+
+/**
+ * Adds the given buffer to the list of buffers to be validated (moved into the
+ * appropriate memory type) with the next batch submission.
+ *
+ * If a buffer is validated multiple times in a batch submission, it ends up
+ * with the intersection of the memory type flags and the union of the
+ * access flags.
+ */
+static void
+drm_intel_add_validate_buffer(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int index;
+
+ if (bo_gem->validate_index != -1)
+ return;
+
+ /* Extend the array of validation entries as necessary. */
+ if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+ int new_size = bufmgr_gem->exec_size * 2;
+
+ if (new_size == 0)
+ new_size = 5;
+
+ bufmgr_gem->exec_objects =
+ realloc(bufmgr_gem->exec_objects,
+ sizeof(*bufmgr_gem->exec_objects) * new_size);
+ bufmgr_gem->exec_bos =
+ realloc(bufmgr_gem->exec_bos,
+ sizeof(*bufmgr_gem->exec_bos) * new_size);
+ bufmgr_gem->exec_size = new_size;
+ }
+
+ index = bufmgr_gem->exec_count;
+ bo_gem->validate_index = index;
+ /* Fill in array entry */
+ bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
+ bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
+ bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
+ bufmgr_gem->exec_objects[index].alignment = bo->align;
+ bufmgr_gem->exec_objects[index].offset = 0;
+ bufmgr_gem->exec_bos[index] = bo;
+ bufmgr_gem->exec_count++;
+}
+
+static void
+drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ int index;
+ int flags = 0;
+
+ if (need_fence)
+ flags |= EXEC_OBJECT_NEEDS_FENCE;
+ if (bo_gem->use_48b_address_range)
+ flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ if (bo_gem->is_softpin)
+ flags |= EXEC_OBJECT_PINNED;
+
+ if (bo_gem->validate_index != -1) {
+ bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
+ return;
+ }
+
+ /* Extend the array of validation entries as necessary. */
+ if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+ int new_size = bufmgr_gem->exec_size * 2;
+
+ if (new_size == 0)
+ new_size = 5;
+
+ bufmgr_gem->exec2_objects =
+ realloc(bufmgr_gem->exec2_objects,
+ sizeof(*bufmgr_gem->exec2_objects) * new_size);
+ bufmgr_gem->exec_bos =
+ realloc(bufmgr_gem->exec_bos,
+ sizeof(*bufmgr_gem->exec_bos) * new_size);
+ bufmgr_gem->exec_size = new_size;
+ }
+
+ index = bufmgr_gem->exec_count;
+ bo_gem->validate_index = index;
+ /* Fill in array entry */
+ bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
+ bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
+ bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
+ bufmgr_gem->exec2_objects[index].alignment = bo->align;
+ bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ?
+ bo->offset64 : 0;
+ bufmgr_gem->exec_bos[index] = bo;
+ bufmgr_gem->exec2_objects[index].flags = flags;
+ bufmgr_gem->exec2_objects[index].rsvd1 = 0;
+ bufmgr_gem->exec2_objects[index].rsvd2 = 0;
+ bufmgr_gem->exec_count++;
+}
+
+#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
+ sizeof(uint32_t))
+
+static void
+drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem,
+ unsigned int alignment)
+{
+ unsigned int size;
+
+ assert(!bo_gem->used_as_reloc_target);
+
+ /* The older chipsets are far-less flexible in terms of tiling,
+ * and require tiled buffer to be size aligned in the aperture.
+ * This means that in the worst possible case we will need a hole
+ * twice as large as the object in order for it to fit into the
+ * aperture. Optimal packing is for wimps.
+ */
+ size = bo_gem->bo.size;
+ if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
+ unsigned int min_size;
+
+ if (bufmgr_gem->has_relaxed_fencing) {
+ if (bufmgr_gem->gen == 3)
+ min_size = 1024*1024;
+ else
+ min_size = 512*1024;
+
+ while (min_size < size)
+ min_size *= 2;
+ } else
+ min_size = size;
+
+ /* Account for worst-case alignment. */
+ alignment = MAX2(alignment, min_size);
+ }
+
+ bo_gem->reloc_tree_size = size + alignment;
+}
+
+static int
+drm_intel_setup_reloc_list(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ unsigned int max_relocs = bufmgr_gem->max_relocs;
+
+ if (bo->size / 4 < max_relocs)
+ max_relocs = bo->size / 4;
+
+ bo_gem->relocs = malloc(max_relocs *
+ sizeof(struct drm_i915_gem_relocation_entry));
+ bo_gem->reloc_target_info = malloc(max_relocs *
+ sizeof(drm_intel_reloc_target));
+ if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
+ bo_gem->has_error = true;
+
+ free (bo_gem->relocs);
+ bo_gem->relocs = NULL;
+
+ free (bo_gem->reloc_target_info);
+ bo_gem->reloc_target_info = NULL;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_busy(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_busy busy;
+ int ret;
+
+ if (bo_gem->reusable && bo_gem->idle)
+ return false;
+
+ memclear(busy);
+ busy.handle = bo_gem->gem_handle;
+
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ if (ret == 0) {
+ bo_gem->idle = !busy.busy;
+ return busy.busy;
+ } else {
+ return false;
+ }
+ return (ret == 0 && busy.busy);
+}
+
+static int
+drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem, int state)
+{
+ struct drm_i915_gem_madvise madv;
+
+ memclear(madv);
+ madv.handle = bo_gem->gem_handle;
+ madv.madv = state;
+ madv.retained = 1;
+ drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+
+ return madv.retained;
+}
+
+static int
+drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
+{
+ return drm_intel_gem_bo_madvise_internal
+ ((drm_intel_bufmgr_gem *) bo->bufmgr,
+ (drm_intel_bo_gem *) bo,
+ madv);
+}
+
+/* drop the oldest entries that have been purged by the kernel */
+static void
+drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
+ struct drm_intel_gem_bo_bucket *bucket)
+{
+ while (!DRMLISTEMPTY(&bucket->head)) {
+ drm_intel_bo_gem *bo_gem;
+
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.next, head);
+ if (drm_intel_gem_bo_madvise_internal
+ (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
+ break;
+
+ DRMLISTDEL(&bo_gem->head);
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ }
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned long flags,
+ uint32_t tiling_mode,
+ unsigned long stride,
+ unsigned int alignment)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ drm_intel_bo_gem *bo_gem;
+ unsigned int page_size = getpagesize();
+ int ret;
+ struct drm_intel_gem_bo_bucket *bucket;
+ bool alloc_from_cache;
+ unsigned long bo_size;
+ bool for_render = false;
+
+ if (flags & BO_ALLOC_FOR_RENDER)
+ for_render = true;
+
+ /* Round the allocated size up to a power of two number of pages. */
+ bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
+
+ /* If we don't have caching at this size, don't actually round the
+ * allocation up.
+ */
+ if (bucket == NULL) {
+ bo_size = size;
+ if (bo_size < page_size)
+ bo_size = page_size;
+ } else {
+ bo_size = bucket->size;
+ }
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ /* Get a buffer out of the cache if available */
+retry:
+ alloc_from_cache = false;
+ if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
+ if (for_render) {
+ /* Allocate new render-target BOs from the tail (MRU)
+ * of the list, as it will likely be hot in the GPU
+ * cache and in the aperture for us.
+ */
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.prev, head);
+ DRMLISTDEL(&bo_gem->head);
+ alloc_from_cache = true;
+ bo_gem->bo.align = alignment;
+ } else {
+ assert(alignment == 0);
+ /* For non-render-target BOs (where we're probably
+ * going to map it first thing in order to fill it
+ * with data), check if the last BO in the cache is
+ * unbusy, and only reuse in that case. Otherwise,
+ * allocating a new buffer is probably faster than
+ * waiting for the GPU to finish.
+ */
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.next, head);
+ if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
+ alloc_from_cache = true;
+ DRMLISTDEL(&bo_gem->head);
+ }
+ }
+
+ if (alloc_from_cache) {
+ if (!drm_intel_gem_bo_madvise_internal
+ (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
+ bucket);
+ goto retry;
+ }
+
+ if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
+ tiling_mode,
+ stride)) {
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ goto retry;
+ }
+ }
+ }
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ if (!alloc_from_cache) {
+ struct drm_i915_gem_create create;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ bo_gem->bo.size = bo_size;
+
+ memclear(create);
+ create.size = bo_size;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_CREATE,
+ &create);
+ bo_gem->gem_handle = create.handle;
+ bo_gem->bo.handle = bo_gem->gem_handle;
+ if (ret != 0) {
+ free(bo_gem);
+ return NULL;
+ }
+ bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->bo.align = alignment;
+
+ bo_gem->tiling_mode = I915_TILING_NONE;
+ bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ bo_gem->stride = 0;
+
+ /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
+ list (vma_list), so better set the list head here */
+ DRMINITLISTHEAD(&bo_gem->name_list);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+ if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
+ tiling_mode,
+ stride)) {
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ return NULL;
+ }
+ }
+
+ bo_gem->name = name;
+ atomic_set(&bo_gem->refcount, 1);
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_fences = 0;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->has_error = false;
+ bo_gem->reusable = true;
+ bo_gem->use_48b_address_range = false;
+
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
+
+ DBG("bo_create: buf %d (%s) %ldb\n",
+ bo_gem->gem_handle, bo_gem->name, size);
+
+ return &bo_gem->bo;
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
+ BO_ALLOC_FOR_RENDER,
+ I915_TILING_NONE, 0,
+ alignment);
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
+ I915_TILING_NONE, 0, 0);
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
+ int x, int y, int cpp, uint32_t *tiling_mode,
+ unsigned long *pitch, unsigned long flags)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ unsigned long size, stride;
+ uint32_t tiling;
+
+ do {
+ unsigned long aligned_y, height_alignment;
+
+ tiling = *tiling_mode;
+
+ /* If we're tiled, our allocations are in 8 or 32-row blocks,
+ * so failure to align our height means that we won't allocate
+ * enough pages.
+ *
+ * If we're untiled, we still have to align to 2 rows high
+ * because the data port accesses 2x2 blocks even if the
+ * bottom row isn't to be rendered, so failure to align means
+ * we could walk off the end of the GTT and fault. This is
+ * documented on 965, and may be the case on older chipsets
+ * too so we try to be careful.
+ */
+ aligned_y = y;
+ height_alignment = 2;
+
+ if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
+ height_alignment = 16;
+ else if (tiling == I915_TILING_X
+ || (IS_915(bufmgr_gem->pci_device)
+ && tiling == I915_TILING_Y))
+ height_alignment = 8;
+ else if (tiling == I915_TILING_Y)
+ height_alignment = 32;
+ aligned_y = ALIGN(y, height_alignment);
+
+ stride = x * cpp;
+ stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
+ size = stride * aligned_y;
+ size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
+ } while (*tiling_mode != tiling);
+ *pitch = stride;
+
+ if (tiling == I915_TILING_NONE)
+ stride = 0;
+
+ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
+ tiling, stride, 0);
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr,
+ uint32_t tiling_mode,
+ uint32_t stride,
+ unsigned long size,
+ unsigned long flags)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ drm_intel_bo_gem *bo_gem;
+ int ret;
+ struct drm_i915_gem_userptr userptr;
+
+ /* Tiling with userptr surfaces is not supported
+ * on all hardware so refuse it for time being.
+ */
+ if (tiling_mode != I915_TILING_NONE)
+ return NULL;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ bo_gem->bo.size = size;
+
+ memclear(userptr);
+ userptr.user_ptr = (__u64)((unsigned long)addr);
+ userptr.user_size = size;
+ userptr.flags = flags;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_USERPTR,
+ &userptr);
+ if (ret != 0) {
+ DBG("bo_create_userptr: "
+ "ioctl failed with user ptr %p size 0x%lx, "
+ "user flags 0x%lx\n", addr, size, flags);
+ free(bo_gem);
+ return NULL;
+ }
+
+ bo_gem->gem_handle = userptr.handle;
+ bo_gem->bo.handle = bo_gem->gem_handle;
+ bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->is_userptr = true;
+ bo_gem->bo.virtual = addr;
+ /* Save the address provided by user */
+ bo_gem->user_virtual = addr;
+ bo_gem->tiling_mode = I915_TILING_NONE;
+ bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ bo_gem->stride = 0;
+
+ DRMINITLISTHEAD(&bo_gem->name_list);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+
+ bo_gem->name = name;
+ atomic_set(&bo_gem->refcount, 1);
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_fences = 0;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->has_error = false;
+ bo_gem->reusable = false;
+ bo_gem->use_48b_address_range = false;
+
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
+
+ DBG("bo_create_userptr: "
+ "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
+ addr, bo_gem->gem_handle, bo_gem->name,
+ size, stride, tiling_mode);
+
+ return &bo_gem->bo;
+}
+
+static bool
+has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int ret;
+ void *ptr;
+ long pgsz;
+ struct drm_i915_gem_userptr userptr;
+
+ pgsz = sysconf(_SC_PAGESIZE);
+ assert(pgsz > 0);
+
+ ret = posix_memalign(&ptr, pgsz, pgsz);
+ if (ret) {
+ DBG("Failed to get a page (%ld) for userptr detection!\n",
+ pgsz);
+ return false;
+ }
+
+ memclear(userptr);
+ userptr.user_ptr = (__u64)(unsigned long)ptr;
+ userptr.user_size = pgsz;
+
+retry:
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
+ if (ret) {
+ if (errno == ENODEV && userptr.flags == 0) {
+ userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
+ goto retry;
+ }
+ free(ptr);
+ return false;
+ }
+
+ /* We don't release the userptr bo here as we want to keep the
+ * kernel mm tracking alive for our lifetime. The first time we
+ * create a userptr object the kernel has to install a mmu_notifer
+ * which is a heavyweight operation (e.g. it requires taking all
+ * mm_locks and stop_machine()).
+ */
+
+ bufmgr_gem->userptr_active.ptr = ptr;
+ bufmgr_gem->userptr_active.handle = userptr.handle;
+
+ return true;
+}
+
+static drm_intel_bo *
+check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr,
+ uint32_t tiling_mode,
+ uint32_t stride,
+ unsigned long size,
+ unsigned long flags)
+{
+ if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
+ bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
+ else
+ bufmgr->bo_alloc_userptr = NULL;
+
+ return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
+ tiling_mode, stride, size, flags);
+}
+
+/**
+ * Returns a drm_intel_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+drm_intel_bo *
+drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ drm_intel_bo_gem *bo_gem;
+ int ret;
+ struct drm_gem_open open_arg;
+ struct drm_i915_gem_get_tiling get_tiling;
+ drmMMListHead *list;
+
+ /* At the moment most applications only have a few named bo.
+ * For instance, in a DRI client only the render buffers passed
+ * between X and the client are named. And since X returns the
+ * alternating names for the front/back buffer a linear search
+ * provides a sufficiently fast match.
+ */
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ for (list = bufmgr_gem->named.next;
+ list != &bufmgr_gem->named;
+ list = list->next) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
+ if (bo_gem->global_name == handle) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return &bo_gem->bo;
+ }
+ }
+
+ memclear(open_arg);
+ open_arg.name = handle;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_GEM_OPEN,
+ &open_arg);
+ if (ret != 0) {
+ DBG("Couldn't reference %s handle 0x%08x: %s\n",
+ name, handle, strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return NULL;
+ }
+ /* Now see if someone has used a prime handle to get this
+ * object from the kernel before by looking through the list
+ * again for a matching gem_handle
+ */
+ for (list = bufmgr_gem->named.next;
+ list != &bufmgr_gem->named;
+ list = list->next) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
+ if (bo_gem->gem_handle == open_arg.handle) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return &bo_gem->bo;
+ }
+ }
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return NULL;
+ }
+
+ bo_gem->bo.size = open_arg.size;
+ bo_gem->bo.offset = 0;
+ bo_gem->bo.offset64 = 0;
+ bo_gem->bo.virtual = NULL;
+ bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->name = name;
+ atomic_set(&bo_gem->refcount, 1);
+ bo_gem->validate_index = -1;
+ bo_gem->gem_handle = open_arg.handle;
+ bo_gem->bo.handle = open_arg.handle;
+ bo_gem->global_name = handle;
+ bo_gem->reusable = false;
+ bo_gem->use_48b_address_range = false;
+
+ memclear(get_tiling);
+ get_tiling.handle = bo_gem->gem_handle;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_GET_TILING,
+ &get_tiling);
+ if (ret != 0) {
+ drm_intel_gem_bo_unreference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return NULL;
+ }
+ bo_gem->tiling_mode = get_tiling.tiling_mode;
+ bo_gem->swizzle_mode = get_tiling.swizzle_mode;
+ /* XXX stride is unknown */
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
+
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
+
+ return &bo_gem->bo;
+}
+
+static void
+drm_intel_gem_bo_free(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_gem_close close;
+ int ret;
+
+ DRMLISTDEL(&bo_gem->vma_list);
+ if (bo_gem->mem_virtual) {
+ VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
+ drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ bufmgr_gem->vma_count--;
+ }
+ if (bo_gem->gtt_virtual) {
+ drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ bufmgr_gem->vma_count--;
+ }
+
+ /* Close this object */
+ memclear(close);
+ close.handle = bo_gem->gem_handle;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
+ if (ret != 0) {
+ DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+ bo_gem->gem_handle, bo_gem->name, strerror(errno));
+ }
+ free(bo);
+}
+
+static void
+drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
+{
+#if HAVE_VALGRIND
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ if (bo_gem->mem_virtual)
+ VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
+
+ if (bo_gem->gtt_virtual)
+ VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
+#endif
+}
+
+/** Frees all cached buffers significantly older than @time. */
+static void
+drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
+{
+ int i;
+
+ if (bufmgr_gem->time == time)
+ return;
+
+ for (i = 0; i < bufmgr_gem->num_buckets; i++) {
+ struct drm_intel_gem_bo_bucket *bucket =
+ &bufmgr_gem->cache_bucket[i];
+
+ while (!DRMLISTEMPTY(&bucket->head)) {
+ drm_intel_bo_gem *bo_gem;
+
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.next, head);
+ if (time - bo_gem->free_time <= 1)
+ break;
+
+ DRMLISTDEL(&bo_gem->head);
+
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ }
+ }
+
+ bufmgr_gem->time = time;
+}
+
+static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int limit;
+
+ DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
+ bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
+
+ if (bufmgr_gem->vma_max < 0)
+ return;
+
+ /* We may need to evict a few entries in order to create new mmaps */
+ limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
+ if (limit < 0)
+ limit = 0;
+
+ while (bufmgr_gem->vma_count > limit) {
+ drm_intel_bo_gem *bo_gem;
+
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bufmgr_gem->vma_cache.next,
+ vma_list);
+ assert(bo_gem->map_count == 0);
+ DRMLISTDELINIT(&bo_gem->vma_list);
+
+ if (bo_gem->mem_virtual) {
+ drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ bo_gem->mem_virtual = NULL;
+ bufmgr_gem->vma_count--;
+ }
+ if (bo_gem->gtt_virtual) {
+ drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ bo_gem->gtt_virtual = NULL;
+ bufmgr_gem->vma_count--;
+ }
+ }
+}
+
+static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem)
+{
+ bufmgr_gem->vma_open--;
+ DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
+ if (bo_gem->mem_virtual)
+ bufmgr_gem->vma_count++;
+ if (bo_gem->gtt_virtual)
+ bufmgr_gem->vma_count++;
+ drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
+static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem)
+{
+ bufmgr_gem->vma_open++;
+ DRMLISTDEL(&bo_gem->vma_list);
+ if (bo_gem->mem_virtual)
+ bufmgr_gem->vma_count--;
+ if (bo_gem->gtt_virtual)
+ bufmgr_gem->vma_count--;
+ drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
+static void
+drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_intel_gem_bo_bucket *bucket;
+ int i;
+
+ /* Unreference all the target buffers */
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ if (bo_gem->reloc_target_info[i].bo != bo) {
+ drm_intel_gem_bo_unreference_locked_timed(bo_gem->
+ reloc_target_info[i].bo,
+ time);
+ }
+ }
+ for (i = 0; i < bo_gem->softpin_target_count; i++)
+ drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
+ time);
+ bo_gem->reloc_count = 0;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->softpin_target_count = 0;
+
+ DBG("bo_unreference final: %d (%s)\n",
+ bo_gem->gem_handle, bo_gem->name);
+
+ /* release memory associated with this object */
+ if (bo_gem->reloc_target_info) {
+ free(bo_gem->reloc_target_info);
+ bo_gem->reloc_target_info = NULL;
+ }
+ if (bo_gem->relocs) {
+ free(bo_gem->relocs);
+ bo_gem->relocs = NULL;
+ }
+ if (bo_gem->softpin_target) {
+ free(bo_gem->softpin_target);
+ bo_gem->softpin_target = NULL;
+ bo_gem->softpin_target_size = 0;
+ }
+
+ /* Clear any left-over mappings */
+ if (bo_gem->map_count) {
+ DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
+ bo_gem->map_count = 0;
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ }
+
+ DRMLISTDEL(&bo_gem->name_list);
+
+ bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
+ /* Put the buffer into our internal cache for reuse if we can. */
+ if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
+ drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
+ I915_MADV_DONTNEED)) {
+ bo_gem->free_time = time;
+
+ bo_gem->name = NULL;
+ bo_gem->validate_index = -1;
+
+ DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
+ } else {
+ drm_intel_gem_bo_free(bo);
+ }
+}
+
+static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
+ time_t time)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ assert(atomic_read(&bo_gem->refcount) > 0);
+ if (atomic_dec_and_test(&bo_gem->refcount))
+ drm_intel_gem_bo_unreference_final(bo, time);
+}
+
+static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ assert(atomic_read(&bo_gem->refcount) > 0);
+
+ if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
+ drm_intel_bufmgr_gem *bufmgr_gem =
+ (drm_intel_bufmgr_gem *) bo->bufmgr;
+ struct timespec time;
+
+ clock_gettime(CLOCK_MONOTONIC, &time);
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ if (atomic_dec_and_test(&bo_gem->refcount)) {
+ drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
+ drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+ }
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ }
+}
+
+static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ if (bo_gem->is_userptr) {
+ /* Return the same user ptr */
+ bo->virtual = bo_gem->user_virtual;
+ return 0;
+ }
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ if (bo_gem->map_count++ == 0)
+ drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+ if (!bo_gem->mem_virtual) {
+ struct drm_i915_gem_mmap mmap_arg;
+
+ DBG("bo_map: %d (%s), map_count=%d\n",
+ bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+ memclear(mmap_arg);
+ mmap_arg.handle = bo_gem->gem_handle;
+ mmap_arg.size = bo->size;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_MMAP,
+ &mmap_arg);
+ if (ret != 0) {
+ ret = -errno;
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ bo_gem->name, strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return ret;
+ }
+ VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
+ bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
+ }
+ DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+ bo_gem->mem_virtual);
+ bo->virtual = bo_gem->mem_virtual;
+
+ memclear(set_domain);
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+ if (write_enable)
+ set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+ else
+ set_domain.write_domain = 0;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ if (ret != 0) {
+ DBG("%s:%d: Error setting to CPU domain %d: %s\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ strerror(errno));
+ }
+
+ if (write_enable)
+ bo_gem->mapped_cpu_write = true;
+
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return 0;
+}
+
+static int
+map_gtt(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int ret;
+
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
+ if (bo_gem->map_count++ == 0)
+ drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+ /* Get a mapping of the buffer if we haven't before. */
+ if (bo_gem->gtt_virtual == NULL) {
+ struct drm_i915_gem_mmap_gtt mmap_arg;
+
+ DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
+ bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+ memclear(mmap_arg);
+ mmap_arg.handle = bo_gem->gem_handle;
+
+ /* Get the fake offset back... */
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_MMAP_GTT,
+ &mmap_arg);
+ if (ret != 0) {
+ ret = -errno;
+ DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name,
+ strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ return ret;
+ }
+
+ /* and mmap it */
+ bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr_gem->fd,
+ mmap_arg.offset);
+ if (bo_gem->gtt_virtual == MAP_FAILED) {
+ bo_gem->gtt_virtual = NULL;
+ ret = -errno;
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name,
+ strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ return ret;
+ }
+ }
+
+ bo->virtual = bo_gem->gtt_virtual;
+
+ DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+ bo_gem->gtt_virtual);
+
+ return 0;
+}
+
+int
+drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ ret = map_gtt(bo);
+ if (ret) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return ret;
+ }
+
+ /* Now move it to the GTT domain so that the GPU and CPU
+ * caches are flushed and the GPU isn't actively using the
+ * buffer.
+ *
+ * The pagefault handler does this domain change for us when
+ * it has unbound the BO from the GTT, but it's up to us to
+ * tell it when we're about to use things if we had done
+ * rendering and it still happens to be bound to the GTT.
+ */
+ memclear(set_domain);
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ if (ret != 0) {
+ DBG("%s:%d: Error setting domain %d: %s\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ strerror(errno));
+ }
+
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return 0;
+}
+
+/**
+ * Performs a mapping of the buffer object like the normal GTT
+ * mapping, but avoids waiting for the GPU to be done reading from or
+ * rendering to the buffer.
+ *
+ * This is used in the implementation of GL_ARB_map_buffer_range: The
+ * user asks to create a buffer, then does a mapping, fills some
+ * space, runs a drawing command, then asks to map it again without
+ * synchronizing because it guarantees that it won't write over the
+ * data that the GPU is busy using (or, more specifically, that if it
+ * does write over the data, it acknowledges that rendering is
+ * undefined).
+ */
+
+int
+drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+#ifdef HAVE_VALGRIND
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+#endif
+ int ret;
+
+ /* If the CPU cache isn't coherent with the GTT, then use a
+ * regular synchronized mapping. The problem is that we don't
+ * track where the buffer was last used on the CPU side in
+ * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
+ * we would potentially corrupt the buffer even when the user
+ * does reasonable things.
+ */
+ if (!bufmgr_gem->has_llc)
+ return drm_intel_gem_bo_map_gtt(bo);
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ ret = map_gtt(bo);
+ if (ret == 0) {
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
+ }
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return ret;
+}
+
+static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int ret = 0;
+
+ if (bo == NULL)
+ return 0;
+
+ if (bo_gem->is_userptr)
+ return 0;
+
+ bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ if (bo_gem->map_count <= 0) {
+ DBG("attempted to unmap an unmapped bo\n");
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ /* Preserve the old behaviour of just treating this as a
+ * no-op rather than reporting the error.
+ */
+ return 0;
+ }
+
+ if (bo_gem->mapped_cpu_write) {
+ struct drm_i915_gem_sw_finish sw_finish;
+
+ /* Cause a flush to happen if the buffer's pinned for
+ * scanout, so the results show up in a timely manner.
+ * Unlike GTT set domains, this only does work if the
+ * buffer should be scanout-related.
+ */
+ memclear(sw_finish);
+ sw_finish.handle = bo_gem->gem_handle;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SW_FINISH,
+ &sw_finish);
+ ret = ret == -1 ? -errno : 0;
+
+ bo_gem->mapped_cpu_write = false;
+ }
+
+ /* We need to unmap after every innovation as we cannot track
+ * an open vma for every bo as that will exhaasut the system
+ * limits and cause later failures.
+ */
+ if (--bo_gem->map_count == 0) {
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ bo->virtual = NULL;
+ }
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return ret;
+}
+
+int
+drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+{
+ return drm_intel_gem_bo_unmap(bo);
+}
+
+static int
+drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_pwrite pwrite;
+ int ret;
+
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
+ memclear(pwrite);
+ pwrite.handle = bo_gem->gem_handle;
+ pwrite.offset = offset;
+ pwrite.size = size;
+ pwrite.data_ptr = (uint64_t) (uintptr_t) data;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_PWRITE,
+ &pwrite);
+ if (ret != 0) {
+ ret = -errno;
+ DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
+ (int)size, strerror(errno));
+ }
+
+ return ret;
+}
+
+static int
+drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
+ int ret;
+
+ memclear(get_pipe_from_crtc_id);
+ get_pipe_from_crtc_id.crtc_id = crtc_id;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
+ &get_pipe_from_crtc_id);
+ if (ret != 0) {
+ /* We return -1 here to signal that we don't
+ * know which pipe is associated with this crtc.
+ * This lets the caller know that this information
+ * isn't available; using the wrong pipe for
+ * vblank waiting can cause the chipset to lock up
+ */
+ return -1;
+ }
+
+ return get_pipe_from_crtc_id.pipe;
+}
+
+static int
+drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_pread pread;
+ int ret;
+
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
+ memclear(pread);
+ pread.handle = bo_gem->gem_handle;
+ pread.offset = offset;
+ pread.size = size;
+ pread.data_ptr = (uint64_t) (uintptr_t) data;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_PREAD,
+ &pread);
+ if (ret != 0) {
+ ret = -errno;
+ DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
+ (int)size, strerror(errno));
+ }
+
+ return ret;
+}
+
+/** Waits for all GPU rendering with the object to have completed. */
+static void
+drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
+{
+ drm_intel_gem_bo_start_gtt_access(bo, 1);
+}
+
+/**
+ * Waits on a BO for the given amount of time.
+ *
+ * @bo: buffer object to wait for
+ * @timeout_ns: amount of time to wait in nanoseconds.
+ * If value is less than 0, an infinite wait will occur.
+ *
+ * Returns 0 if the wait was successful ie. the last batch referencing the
+ * object has completed within the allotted time. Otherwise some negative return
+ * value describes the error. Of particular interest is -ETIME when the wait has
+ * failed to yield the desired result.
+ *
+ * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
+ * the operation to give up after a certain amount of time. Another subtle
+ * difference is the internal locking semantics are different (this variant does
+ * not hold the lock for the duration of the wait). This makes the wait subject
+ * to a larger userspace race window.
+ *
+ * The implementation shall wait until the object is no longer actively
+ * referenced within a batch buffer at the time of the call. The wait will
+ * not guarantee that the buffer is re-issued via another thread, or an flinked
+ * handle. Userspace must make sure this race does not occur if such precision
+ * is important.
+ *
+ * Note that some kernels have broken the inifite wait for negative values
+ * promise, upgrade to latest stable kernels if this is the case.
+ */
+int
+drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_wait wait;
+ int ret;
+
+ if (!bufmgr_gem->has_wait_timeout) {
+ DBG("%s:%d: Timed wait is not supported. Falling back to "
+ "infinite wait\n", __FILE__, __LINE__);
+ if (timeout_ns) {
+ drm_intel_gem_bo_wait_rendering(bo);
+ return 0;
+ } else {
+ return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
+ }
+ }
+
+ memclear(wait);
+ wait.bo_handle = bo_gem->gem_handle;
+ wait.timeout_ns = timeout_ns;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
+ if (ret == -1)
+ return -errno;
+
+ return ret;
+}
+
+/**
+ * Sets the object to the GTT read and possibly write domain, used by the X
+ * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
+ *
+ * In combination with drm_intel_gem_bo_pin() and manual fence management, we
+ * can do tiled pixmaps this way.
+ */
+void
+drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ memclear(set_domain);
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ if (ret != 0) {
+ DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ set_domain.read_domains, set_domain.write_domain,
+ strerror(errno));
+ }
+}
+
+static void
+drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ struct drm_gem_close close_bo;
+ int i, ret;
+
+ free(bufmgr_gem->exec2_objects);
+ free(bufmgr_gem->exec_objects);
+ free(bufmgr_gem->exec_bos);
+
+ pthread_mutex_destroy(&bufmgr_gem->lock);
+
+ /* Free any cached buffer objects we were going to reuse */
+ for (i = 0; i < bufmgr_gem->num_buckets; i++) {
+ struct drm_intel_gem_bo_bucket *bucket =
+ &bufmgr_gem->cache_bucket[i];
+ drm_intel_bo_gem *bo_gem;
+
+ while (!DRMLISTEMPTY(&bucket->head)) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.next, head);
+ DRMLISTDEL(&bo_gem->head);
+
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ }
+ }
+
+ /* Release userptr bo kept hanging around for optimisation. */
+ if (bufmgr_gem->userptr_active.ptr) {
+ memclear(close_bo);
+ close_bo.handle = bufmgr_gem->userptr_active.handle;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
+ free(bufmgr_gem->userptr_active.ptr);
+ if (ret)
+ fprintf(stderr,
+ "Failed to release test userptr object! (%d) "
+ "i915 kernel driver may not be sane!\n", errno);
+ }
+
+ free(bufmgr);
+}
+
+/**
+ * Adds the target buffer to the validation list and adds the relocation
+ * to the reloc_buffer's relocation list.
+ *
+ * The relocation entry at the given offset must already contain the
+ * precomputed relocation value, because the kernel will optimize out
+ * the relocation entry write when the buffer hasn't moved from the
+ * last known offset in target_bo.
+ */
+static int
+do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain,
+ bool need_fence)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
+ bool fenced_command;
+
+ if (bo_gem->has_error)
+ return -ENOMEM;
+
+ if (target_bo_gem->has_error) {
+ bo_gem->has_error = true;
+ return -ENOMEM;
+ }
+
+ /* We never use HW fences for rendering on 965+ */
+ if (bufmgr_gem->gen >= 4)
+ need_fence = false;
+
+ fenced_command = need_fence;
+ if (target_bo_gem->tiling_mode == I915_TILING_NONE)
+ need_fence = false;
+
+ /* Create a new relocation list if needed */
+ if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
+ return -ENOMEM;
+
+ /* Check overflow */
+ assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
+
+ /* Check args */
+ assert(offset <= bo->size - 4);
+ assert((write_domain & (write_domain - 1)) == 0);
+
+ /* An object needing a fence is a tiled buffer, so it won't have
+ * relocs to other buffers.
+ */
+ if (need_fence) {
+ assert(target_bo_gem->reloc_count == 0);
+ target_bo_gem->reloc_tree_fences = 1;
+ }
+
+ /* Make sure that we're not adding a reloc to something whose size has
+ * already been accounted for.
+ */
+ assert(!bo_gem->used_as_reloc_target);
+ if (target_bo_gem != bo_gem) {
+ target_bo_gem->used_as_reloc_target = true;
+ bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
+ bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
+ }
+
+ bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
+ if (target_bo != bo)
+ drm_intel_gem_bo_reference(target_bo);
+ if (fenced_command)
+ bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
+ DRM_INTEL_RELOC_FENCE;
+ else
+ bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
+
+ bo_gem->relocs[bo_gem->reloc_count].offset = offset;
+ bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
+ bo_gem->relocs[bo_gem->reloc_count].target_handle =
+ target_bo_gem->gem_handle;
+ bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
+ bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
+ bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
+ bo_gem->reloc_count++;
+
+ return 0;
+}
+
+static void
+drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ bo_gem->use_48b_address_range = enable;
+}
+
+static int
+drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
+ if (bo_gem->has_error)
+ return -ENOMEM;
+
+ if (target_bo_gem->has_error) {
+ bo_gem->has_error = true;
+ return -ENOMEM;
+ }
+
+ if (!target_bo_gem->is_softpin)
+ return -EINVAL;
+ if (target_bo_gem == bo_gem)
+ return -EINVAL;
+
+ if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
+ int new_size = bo_gem->softpin_target_size * 2;
+ if (new_size == 0)
+ new_size = bufmgr_gem->max_relocs;
+
+ bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
+ sizeof(drm_intel_bo *));
+ if (!bo_gem->softpin_target)
+ return -ENOMEM;
+
+ bo_gem->softpin_target_size = new_size;
+ }
+ bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
+ drm_intel_gem_bo_reference(target_bo);
+ bo_gem->softpin_target_count++;
+
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
+
+ if (target_bo_gem->is_softpin)
+ return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
+ else
+ return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
+ read_domains, write_domain,
+ !bufmgr_gem->fenced_relocs);
+}
+
+static int
+drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo,
+ uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
+ read_domains, write_domain, true);
+}
+
+int
+drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ return bo_gem->reloc_count;
+}
+
+/**
+ * Removes existing relocation entries in the BO after "start".
+ *
+ * This allows a user to avoid a two-step process for state setup with
+ * counting up all the buffer objects and doing a
+ * drm_intel_bufmgr_check_aperture_space() before emitting any of the
+ * relocations for the state setup. Instead, save the state of the
+ * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
+ * state, and then check if it still fits in the aperture.
+ *
+ * Any further drm_intel_bufmgr_check_aperture_space() queries
+ * involving this buffer in the tree are undefined after this call.
+ *
+ * This also removes all softpinned targets being referenced by the BO.
+ */
+void
+drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+ struct timespec time;
+
+ clock_gettime(CLOCK_MONOTONIC, &time);
+
+ assert(bo_gem->reloc_count >= start);
+
+ /* Unreference the cleared target buffers */
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ for (i = start; i < bo_gem->reloc_count; i++) {
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
+ if (&target_bo_gem->bo != bo) {
+ bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
+ drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
+ time.tv_sec);
+ }
+ }
+ bo_gem->reloc_count = start;
+
+ for (i = 0; i < bo_gem->softpin_target_count; i++) {
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
+ drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
+ }
+ bo_gem->softpin_target_count = 0;
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+}
+
+/**
+ * Walk the tree of relocations rooted at BO and accumulate the list of
+ * validations to be performed and update the relocation buffers with
+ * index values into the validation list.
+ */
+static void
+drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+
+ if (bo_gem->relocs == NULL)
+ return;
+
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
+
+ if (target_bo == bo)
+ continue;
+
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+
+ /* Continue walking the tree depth-first. */
+ drm_intel_gem_bo_process_reloc(target_bo);
+
+ /* Add the target to the validate list */
+ drm_intel_add_validate_buffer(target_bo);
+ }
+}
+
+static void
+drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ int i;
+
+ if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
+ return;
+
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
+ int need_fence;
+
+ if (target_bo == bo)
+ continue;
+
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+
+ /* Continue walking the tree depth-first. */
+ drm_intel_gem_bo_process_reloc2(target_bo);
+
+ need_fence = (bo_gem->reloc_target_info[i].flags &
+ DRM_INTEL_RELOC_FENCE);
+
+ /* Add the target to the validate list */
+ drm_intel_add_validate_buffer2(target_bo, need_fence);
+ }
+
+ for (i = 0; i < bo_gem->softpin_target_count; i++) {
+ drm_intel_bo *target_bo = bo_gem->softpin_target[i];
+
+ if (target_bo == bo)
+ continue;
+
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ drm_intel_gem_bo_process_reloc2(target_bo);
+ drm_intel_add_validate_buffer2(target_bo, false);
+ }
+}
+
+
+static void
+drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int i;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ /* Update the buffer offset */
+ if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
+ DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
+ bo_gem->gem_handle, bo_gem->name,
+ upper_32_bits(bo->offset64),
+ lower_32_bits(bo->offset64),
+ upper_32_bits(bufmgr_gem->exec_objects[i].offset),
+ lower_32_bits(bufmgr_gem->exec_objects[i].offset));
+ bo->offset64 = bufmgr_gem->exec_objects[i].offset;
+ bo->offset = bufmgr_gem->exec_objects[i].offset;
+ }
+ }
+}
+
+static void
+drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int i;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+
+ /* Update the buffer offset */
+ if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
+ /* If we're seeing softpinned object here it means that the kernel
+ * has relocated our object... Indicating a programming error
+ */
+ assert(!bo_gem->is_softpin);
+ DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
+ bo_gem->gem_handle, bo_gem->name,
+ upper_32_bits(bo->offset64),
+ lower_32_bits(bo->offset64),
+ upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
+ lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
+ bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
+ bo->offset = bufmgr_gem->exec2_objects[i].offset;
+ }
+ }
+}
+
+void
+drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
+ int x1, int y1, int width, int height,
+ enum aub_dump_bmp_format format,
+ int pitch, int offset)
+{
+}
+
+static int
+drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ struct drm_i915_gem_execbuffer execbuf;
+ int ret, i;
+
+ if (to_bo_gem(bo)->has_error)
+ return -ENOMEM;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ /* Update indices and set up the validate list. */
+ drm_intel_gem_bo_process_reloc(bo);
+
+ /* Add the batch buffer to the validation list. There are no
+ * relocations pointing to it.
+ */
+ drm_intel_add_validate_buffer(bo);
+
+ memclear(execbuf);
+ execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
+ execbuf.buffer_count = bufmgr_gem->exec_count;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = used;
+ execbuf.cliprects_ptr = (uintptr_t) cliprects;
+ execbuf.num_cliprects = num_cliprects;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = DR4;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_EXECBUFFER,
+ &execbuf);
+ if (ret != 0) {
+ ret = -errno;
+ if (errno == ENOSPC) {
+ DBG("Execbuffer fails to pin. "
+ "Estimate: %u. Actual: %u. Available: %u\n",
+ drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->
+ exec_count),
+ drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->
+ exec_count),
+ (unsigned int)bufmgr_gem->gtt_size);
+ }
+ }
+ drm_intel_update_buffer_offsets(bufmgr_gem);
+
+ if (bufmgr_gem->bufmgr.debug)
+ drm_intel_gem_dump_validation_list(bufmgr_gem);
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
+
+ bo_gem->idle = false;
+
+ /* Disconnect the buffer from the validate list */
+ bo_gem->validate_index = -1;
+ bufmgr_gem->exec_bos[i] = NULL;
+ }
+ bufmgr_gem->exec_count = 0;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return ret;
+}
+
+static int
+do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
+ drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
+ unsigned int flags)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ int ret = 0;
+ int i;
+
+ if (to_bo_gem(bo)->has_error)
+ return -ENOMEM;
+
+ switch (flags & 0x7) {
+ default:
+ return -EINVAL;
+ case I915_EXEC_BLT:
+ if (!bufmgr_gem->has_blt)
+ return -EINVAL;
+ break;
+ case I915_EXEC_BSD:
+ if (!bufmgr_gem->has_bsd)
+ return -EINVAL;
+ break;
+ case I915_EXEC_VEBOX:
+ if (!bufmgr_gem->has_vebox)
+ return -EINVAL;
+ break;
+ case I915_EXEC_RENDER:
+ case I915_EXEC_DEFAULT:
+ break;
+ }
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ /* Update indices and set up the validate list. */
+ drm_intel_gem_bo_process_reloc2(bo);
+
+ /* Add the batch buffer to the validation list. There are no relocations
+ * pointing to it.
+ */
+ drm_intel_add_validate_buffer2(bo, 0);
+
+ memclear(execbuf);
+ execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
+ execbuf.buffer_count = bufmgr_gem->exec_count;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = used;
+ execbuf.cliprects_ptr = (uintptr_t)cliprects;
+ execbuf.num_cliprects = num_cliprects;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = DR4;
+ execbuf.flags = flags;
+ if (ctx == NULL)
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ else
+ i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
+ execbuf.rsvd2 = 0;
+
+ if (bufmgr_gem->no_exec)
+ goto skip_execution;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_EXECBUFFER2,
+ &execbuf);
+ if (ret != 0) {
+ ret = -errno;
+ if (ret == -ENOSPC) {
+ DBG("Execbuffer fails to pin. "
+ "Estimate: %u. Actual: %u. Available: %u\n",
+ drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->exec_count),
+ drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->exec_count),
+ (unsigned int) bufmgr_gem->gtt_size);
+ }
+ }
+ drm_intel_update_buffer_offsets2(bufmgr_gem);
+
+skip_execution:
+ if (bufmgr_gem->bufmgr.debug)
+ drm_intel_gem_dump_validation_list(bufmgr_gem);
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
+
+ bo_gem->idle = false;
+
+ /* Disconnect the buffer from the validate list */
+ bo_gem->validate_index = -1;
+ bufmgr_gem->exec_bos[i] = NULL;
+ }
+ bufmgr_gem->exec_count = 0;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return ret;
+}
+
+static int
+drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4)
+{
+ return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
+ I915_EXEC_RENDER);
+}
+
+static int
+drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
+ unsigned int flags)
+{
+ return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
+ flags);
+}
+
+int
+drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
+ int used, unsigned int flags)
+{
+ return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
+}
+
+static int
+drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_pin pin;
+ int ret;
+
+ memclear(pin);
+ pin.handle = bo_gem->gem_handle;
+ pin.alignment = alignment;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_PIN,
+ &pin);
+ if (ret != 0)
+ return -errno;
+
+ bo->offset64 = pin.offset;
+ bo->offset = pin.offset;
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_unpin(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_unpin unpin;
+ int ret;
+
+ memclear(unpin);
+ unpin.handle = bo_gem->gem_handle;
+
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
+ if (ret != 0)
+ return -errno;
+
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
+ uint32_t tiling_mode,
+ uint32_t stride)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_tiling set_tiling;
+ int ret;
+
+ if (bo_gem->global_name == 0 &&
+ tiling_mode == bo_gem->tiling_mode &&
+ stride == bo_gem->stride)
+ return 0;
+
+ memset(&set_tiling, 0, sizeof(set_tiling));
+ do {
+ /* set_tiling is slightly broken and overwrites the
+ * input on the error path, so we have to open code
+ * rmIoctl.
+ */
+ set_tiling.handle = bo_gem->gem_handle;
+ set_tiling.tiling_mode = tiling_mode;
+ set_tiling.stride = stride;
+
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SET_TILING,
+ &set_tiling);
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+ if (ret == -1)
+ return -errno;
+
+ bo_gem->tiling_mode = set_tiling.tiling_mode;
+ bo_gem->swizzle_mode = set_tiling.swizzle_mode;
+ bo_gem->stride = set_tiling.stride;
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int ret;
+
+ /* Tiling with userptr surfaces is not supported
+ * on all hardware so refuse it for time being.
+ */
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
+ /* Linear buffers have no stride. By ensuring that we only ever use
+ * stride 0 with linear buffers, we simplify our code.
+ */
+ if (*tiling_mode == I915_TILING_NONE)
+ stride = 0;
+
+ ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
+ if (ret == 0)
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
+
+ *tiling_mode = bo_gem->tiling_mode;
+ return ret;
+}
+
+static int
+drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ *tiling_mode = bo_gem->tiling_mode;
+ *swizzle_mode = bo_gem->swizzle_mode;
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ bo_gem->is_softpin = true;
+ bo->offset64 = offset;
+ bo->offset = offset;
+ return 0;
+}
+
+drm_intel_bo *
+drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ int ret;
+ uint32_t handle;
+ drm_intel_bo_gem *bo_gem;
+ struct drm_i915_gem_get_tiling get_tiling;
+ drmMMListHead *list;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
+ if (ret) {
+ DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return NULL;
+ }
+
+ /*
+ * See if the kernel has already returned this buffer to us. Just as
+ * for named buffers, we must not create two bo's pointing at the same
+ * kernel object
+ */
+ for (list = bufmgr_gem->named.next;
+ list != &bufmgr_gem->named;
+ list = list->next) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
+ if (bo_gem->gem_handle == handle) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return &bo_gem->bo;
+ }
+ }
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return NULL;
+ }
+ /* Determine size of bo. The fd-to-handle ioctl really should
+ * return the size, but it doesn't. If we have kernel 3.12 or
+ * later, we can lseek on the prime fd to get the size. Older
+ * kernels will just fail, in which case we fall back to the
+ * provided (estimated or guess size). */
+ ret = lseek(prime_fd, 0, SEEK_END);
+ if (ret != -1)
+ bo_gem->bo.size = ret;
+ else
+ bo_gem->bo.size = size;
+
+ bo_gem->bo.handle = handle;
+ bo_gem->bo.bufmgr = bufmgr;
+
+ bo_gem->gem_handle = handle;
+
+ atomic_set(&bo_gem->refcount, 1);
+
+ bo_gem->name = "prime";
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_fences = 0;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->has_error = false;
+ bo_gem->reusable = false;
+ bo_gem->use_48b_address_range = false;
+
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ memclear(get_tiling);
+ get_tiling.handle = bo_gem->gem_handle;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_GET_TILING,
+ &get_tiling);
+ if (ret != 0) {
+ DBG("create_from_prime: failed to get tiling: %s\n", strerror(errno));
+ drm_intel_gem_bo_unreference(&bo_gem->bo);
+ return NULL;
+ }
+ bo_gem->tiling_mode = get_tiling.tiling_mode;
+ bo_gem->swizzle_mode = get_tiling.swizzle_mode;
+ /* XXX stride is unknown */
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
+
+ return &bo_gem->bo;
+}
+
+int
+drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ if (DRMLISTEMPTY(&bo_gem->name_list))
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
+ DRM_CLOEXEC, prime_fd) != 0)
+ return -errno;
+
+ bo_gem->reusable = false;
+
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int ret;
+
+ if (!bo_gem->global_name) {
+ struct drm_gem_flink flink;
+
+ memclear(flink);
+ flink.handle = bo_gem->gem_handle;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (ret != 0) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return -errno;
+ }
+
+ bo_gem->global_name = flink.name;
+ bo_gem->reusable = false;
+
+ if (DRMLISTEMPTY(&bo_gem->name_list))
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ }
+
+ *name = bo_gem->global_name;
+ return 0;
+}
+
+/**
+ * Enables unlimited caching of buffer objects for reuse.
+ *
+ * This is potentially very memory expensive, as the cache at each bucket
+ * size is only bounded by how many buffers of that size we've managed to have
+ * in flight at once.
+ */
+void
+drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+
+ bufmgr_gem->bo_reuse = true;
+}
+
+/**
+ * Enable use of fenced reloc type.
+ *
+ * New code should enable this to avoid unnecessary fence register
+ * allocation. If this option is not enabled, all relocs will have fence
+ * register allocated.
+ */
+void
+drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
+ bufmgr_gem->fenced_relocs = true;
+}
+
+/**
+ * Return the additional aperture space required by the tree of buffer objects
+ * rooted at bo.
+ */
+static int
+drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+ int total = 0;
+
+ if (bo == NULL || bo_gem->included_in_check_aperture)
+ return 0;
+
+ total += bo->size;
+ bo_gem->included_in_check_aperture = true;
+
+ for (i = 0; i < bo_gem->reloc_count; i++)
+ total +=
+ drm_intel_gem_bo_get_aperture_space(bo_gem->
+ reloc_target_info[i].bo);
+
+ return total;
+}
+
+/**
+ * Count the number of buffers in this list that need a fence reg
+ *
+ * If the count is greater than the number of available regs, we'll have
+ * to ask the caller to resubmit a batch with fewer tiled buffers.
+ *
+ * This function over-counts if the same buffer is used multiple times.
+ */
+static unsigned int
+drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
+{
+ int i;
+ unsigned int total = 0;
+
+ for (i = 0; i < count; i++) {
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
+
+ if (bo_gem == NULL)
+ continue;
+
+ total += bo_gem->reloc_tree_fences;
+ }
+ return total;
+}
+
+/**
+ * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
+ * for the next drm_intel_bufmgr_check_aperture_space() call.
+ */
+static void
+drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+
+ if (bo == NULL || !bo_gem->included_in_check_aperture)
+ return;
+
+ bo_gem->included_in_check_aperture = false;
+
+ for (i = 0; i < bo_gem->reloc_count; i++)
+ drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
+ reloc_target_info[i].bo);
+}
+
+/**
+ * Return a conservative estimate for the amount of aperture required
+ * for a collection of buffers. This may double-count some buffers.
+ */
+static unsigned int
+drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
+{
+ int i;
+ unsigned int total = 0;
+
+ for (i = 0; i < count; i++) {
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
+ if (bo_gem != NULL)
+ total += bo_gem->reloc_tree_size;
+ }
+ return total;
+}
+
+/**
+ * Return the amount of aperture needed for a collection of buffers.
+ * This avoids double counting any buffers, at the cost of looking
+ * at every buffer in the set.
+ */
+static unsigned int
+drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
+{
+ int i;
+ unsigned int total = 0;
+
+ for (i = 0; i < count; i++) {
+ total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
+ /* For the first buffer object in the array, we get an
+ * accurate count back for its reloc_tree size (since nothing
+ * had been flagged as being counted yet). We can save that
+ * value out as a more conservative reloc_tree_size that
+ * avoids double-counting target buffers. Since the first
+ * buffer happens to usually be the batch buffer in our
+ * callers, this can pull us back from doing the tree
+ * walk on every new batch emit.
+ */
+ if (i == 0) {
+ drm_intel_bo_gem *bo_gem =
+ (drm_intel_bo_gem *) bo_array[i];
+ bo_gem->reloc_tree_size = total;
+ }
+ }
+
+ for (i = 0; i < count; i++)
+ drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
+ return total;
+}
+
+/**
+ * Return -1 if the batchbuffer should be flushed before attempting to
+ * emit rendering referencing the buffers pointed to by bo_array.
+ *
+ * This is required because if we try to emit a batchbuffer with relocations
+ * to a tree of buffers that won't simultaneously fit in the aperture,
+ * the rendering will return an error at a point where the software is not
+ * prepared to recover from it.
+ *
+ * However, we also want to emit the batchbuffer significantly before we reach
+ * the limit, as a series of batchbuffers each of which references buffers
+ * covering almost all of the aperture means that at each emit we end up
+ * waiting to evict a buffer from the last rendering, and we get synchronous
+ * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
+ * get better parallelism.
+ */
+static int
+drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem =
+ (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
+ unsigned int total = 0;
+ unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
+ int total_fences;
+
+ /* Check for fence reg constraints if necessary */
+ if (bufmgr_gem->available_fences) {
+ total_fences = drm_intel_gem_total_fences(bo_array, count);
+ if (total_fences > bufmgr_gem->available_fences)
+ return -ENOSPC;
+ }
+
+ total = drm_intel_gem_estimate_batch_space(bo_array, count);
+
+ if (total > threshold)
+ total = drm_intel_gem_compute_batch_space(bo_array, count);
+
+ if (total > threshold) {
+ DBG("check_space: overflowed available aperture, "
+ "%dkb vs %dkb\n",
+ total / 1024, (int)bufmgr_gem->gtt_size / 1024);
+ return -ENOSPC;
+ } else {
+ DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
+ (int)bufmgr_gem->gtt_size / 1024);
+ return 0;
+ }
+}
+
+/*
+ * Disable buffer reuse for objects which are shared with the kernel
+ * as scanout buffers
+ */
+static int
+drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ bo_gem->reusable = false;
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ return bo_gem->reusable;
+}
+
+static int
+_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ if (bo_gem->reloc_target_info[i].bo == target_bo)
+ return 1;
+ if (bo == bo_gem->reloc_target_info[i].bo)
+ continue;
+ if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
+ target_bo))
+ return 1;
+ }
+
+ for (i = 0; i< bo_gem->softpin_target_count; i++) {
+ if (bo_gem->softpin_target[i] == target_bo)
+ return 1;
+ if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
+ return 1;
+ }
+
+ return 0;
+}
+
+/** Return true if target_bo is referenced by bo's relocation tree. */
+static int
+drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
+{
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
+
+ if (bo == NULL || target_bo == NULL)
+ return 0;
+ if (target_bo_gem->used_as_reloc_target)
+ return _drm_intel_gem_bo_references(bo, target_bo);
+ return 0;
+}
+
+static void
+add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
+{
+ unsigned int i = bufmgr_gem->num_buckets;
+
+ assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
+
+ DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
+ bufmgr_gem->cache_bucket[i].size = size;
+ bufmgr_gem->num_buckets++;
+}
+
+static void
+init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ unsigned long size, cache_max_size = 64 * 1024 * 1024;
+
+ /* OK, so power of two buckets was too wasteful of memory.
+ * Give 3 other sizes between each power of two, to hopefully
+ * cover things accurately enough. (The alternative is
+ * probably to just go for exact matching of sizes, and assume
+ * that for things like composited window resize the tiled
+ * width/height alignment and rounding of sizes to pages will
+ * get us useful cache hit rates anyway)
+ */
+ add_bucket(bufmgr_gem, 4096);
+ add_bucket(bufmgr_gem, 4096 * 2);
+ add_bucket(bufmgr_gem, 4096 * 3);
+
+ /* Initialize the linked lists for BO reuse cache. */
+ for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
+ add_bucket(bufmgr_gem, size);
+
+ add_bucket(bufmgr_gem, size + size * 1 / 4);
+ add_bucket(bufmgr_gem, size + size * 2 / 4);
+ add_bucket(bufmgr_gem, size + size * 3 / 4);
+ }
+}
+
+void
+drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ bufmgr_gem->vma_max = limit;
+
+ drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
+/**
+ * Get the PCI ID for the device. This can be overridden by setting the
+ * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
+ */
+static int
+get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ char *devid_override;
+ int devid = 0;
+ int ret;
+ drm_i915_getparam_t gp;
+
+ if (geteuid() == getuid()) {
+ devid_override = getenv("INTEL_DEVID_OVERRIDE");
+ if (devid_override) {
+ bufmgr_gem->no_exec = true;
+ return strtod(devid_override, NULL);
+ }
+ }
+
+ memclear(gp);
+ gp.param = I915_PARAM_CHIPSET_ID;
+ gp.value = &devid;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret) {
+ fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
+ fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
+ }
+ return devid;
+}
+
+int
+drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ return bufmgr_gem->pci_device;
+}
+
+/**
+ * Sets the AUB filename.
+ *
+ * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
+ * for it to have any effect.
+ */
+void
+drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
+ const char *filename)
+{
+}
+
+/**
+ * Sets up AUB dumping.
+ *
+ * This is a trace file format that can be used with the simulator.
+ * Packets are emitted in a format somewhat like GPU command packets.
+ * You can set up a GTT and upload your objects into the referenced
+ * space, then send off batchbuffers and get BMPs out the other end.
+ */
+void
+drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
+{
+ fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
+ "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
+ "then run (for example)\n\n"
+ "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
+ "See the intel_aubdump man page for more details.\n");
+}
+
+drm_intel_context *
+drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ struct drm_i915_gem_context_create create;
+ drm_intel_context *context = NULL;
+ int ret;
+
+ context = calloc(1, sizeof(*context));
+ if (!context)
+ return NULL;
+
+ memclear(create);
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
+ if (ret != 0) {
+ DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
+ strerror(errno));
+ free(context);
+ return NULL;
+ }
+
+ context->ctx_id = create.ctx_id;
+ context->bufmgr = bufmgr;
+
+ return context;
+}
+
+void
+drm_intel_gem_context_destroy(drm_intel_context *ctx)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+ struct drm_i915_gem_context_destroy destroy;
+ int ret;
+
+ if (ctx == NULL)
+ return;
+
+ memclear(destroy);
+
+ bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
+ destroy.ctx_id = ctx->ctx_id;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
+ &destroy);
+ if (ret != 0)
+ fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
+ strerror(errno));
+
+ free(ctx);
+}
+
+int
+drm_intel_get_reset_stats(drm_intel_context *ctx,
+ uint32_t *reset_count,
+ uint32_t *active,
+ uint32_t *pending)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+ struct drm_i915_reset_stats stats;
+ int ret;
+
+ if (ctx == NULL)
+ return -EINVAL;
+
+ memclear(stats);
+
+ bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
+ stats.ctx_id = ctx->ctx_id;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GET_RESET_STATS,
+ &stats);
+ if (ret == 0) {
+ if (reset_count != NULL)
+ *reset_count = stats.reset_count;
+
+ if (active != NULL)
+ *active = stats.batch_active;
+
+ if (pending != NULL)
+ *pending = stats.batch_pending;
+ }
+
+ return ret;
+}
+
+int
+drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
+ uint32_t offset,
+ uint64_t *result)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ struct drm_i915_reg_read reg_read;
+ int ret;
+
+ memclear(reg_read);
+ reg_read.offset = offset;
+
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
+
+ *result = reg_read.val;
+ return ret;
+}
+
+int
+drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
+{
+ drm_i915_getparam_t gp;
+ int ret;
+
+ memclear(gp);
+ gp.value = (int*)subslice_total;
+ gp.param = I915_PARAM_SUBSLICE_TOTAL;
+ ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret)
+ return -errno;
+
+ return 0;
+}
+
+int
+drm_intel_get_eu_total(int fd, unsigned int *eu_total)
+{
+ drm_i915_getparam_t gp;
+ int ret;
+
+ memclear(gp);
+ gp.value = (int*)eu_total;
+ gp.param = I915_PARAM_EU_TOTAL;
+ ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret)
+ return -errno;
+
+ return 0;
+}
+
+/**
+ * Annotate the given bo for use in aub dumping.
+ *
+ * \param annotations is an array of drm_intel_aub_annotation objects
+ * describing the type of data in various sections of the bo. Each
+ * element of the array specifies the type and subtype of a section of
+ * the bo, and the past-the-end offset of that section. The elements
+ * of \c annotations must be sorted so that ending_offset is
+ * increasing.
+ *
+ * \param count is the number of elements in the \c annotations array.
+ * If \c count is zero, then \c annotations will not be dereferenced.
+ *
+ * Annotations are copied into a private data structure, so caller may
+ * re-use the memory pointed to by \c annotations after the call
+ * returns.
+ *
+ * Annotations are stored for the lifetime of the bo; to reset to the
+ * default state (no annotations), call this function with a \c count
+ * of zero.
+ */
+void
+drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
+ drm_intel_aub_annotation *annotations,
+ unsigned count)
+{
+}
+
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find(int fd)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+
+ DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
+ if (bufmgr_gem->fd == fd) {
+ atomic_inc(&bufmgr_gem->refcount);
+ return bufmgr_gem;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
+ pthread_mutex_lock(&bufmgr_list_mutex);
+
+ if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+ DRMLISTDEL(&bufmgr_gem->managers);
+ drm_intel_bufmgr_gem_destroy(bufmgr);
+ }
+
+ pthread_mutex_unlock(&bufmgr_list_mutex);
+ }
+}
+
+/**
+ * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
+ * and manage map buffer objections.
+ *
+ * \param fd File descriptor of the opened DRM device.
+ */
+drm_intel_bufmgr *
+drm_intel_bufmgr_gem_init(int fd, int batch_size)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+ struct drm_i915_gem_get_aperture aperture;
+ drm_i915_getparam_t gp;
+ int ret, tmp;
+ bool exec2 = false;
+
+ pthread_mutex_lock(&bufmgr_list_mutex);
+
+ bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
+ if (bufmgr_gem)
+ goto exit;
+
+ bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+ if (bufmgr_gem == NULL)
+ goto exit;
+
+ bufmgr_gem->fd = fd;
+ atomic_set(&bufmgr_gem->refcount, 1);
+
+ if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
+ free(bufmgr_gem);
+ bufmgr_gem = NULL;
+ goto exit;
+ }
+
+ memclear(aperture);
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_GET_APERTURE,
+ &aperture);
+
+ if (ret == 0)
+ bufmgr_gem->gtt_size = aperture.aper_available_size;
+ else {
+ fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
+ strerror(errno));
+ bufmgr_gem->gtt_size = 128 * 1024 * 1024;
+ fprintf(stderr, "Assuming %dkB available aperture size.\n"
+ "May lead to reduced performance or incorrect "
+ "rendering.\n",
+ (int)bufmgr_gem->gtt_size / 1024);
+ }
+
+ bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
+
+ if (IS_GEN2(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 2;
+ else if (IS_GEN3(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 3;
+ else if (IS_GEN4(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 4;
+ else if (IS_GEN5(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 5;
+ else if (IS_GEN6(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 6;
+ else if (IS_GEN7(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 7;
+ else if (IS_GEN8(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 8;
+ else if (IS_GEN9(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 9;
+ else {
+ free(bufmgr_gem);
+ bufmgr_gem = NULL;
+ goto exit;
+ }
+
+ if (IS_GEN3(bufmgr_gem->pci_device) &&
+ bufmgr_gem->gtt_size > 256*1024*1024) {
+ /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
+ * be used for tiled blits. To simplify the accounting, just
+ * subtract the unmappable part (fixed to 256MB on all known
+ * gen3 devices) if the kernel advertises it. */
+ bufmgr_gem->gtt_size -= 256*1024*1024;
+ }
+
+ memclear(gp);
+ gp.value = &tmp;
+
+ gp.param = I915_PARAM_HAS_EXECBUF2;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (!ret)
+ exec2 = true;
+
+ gp.param = I915_PARAM_HAS_BSD;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ bufmgr_gem->has_bsd = ret == 0;
+
+ gp.param = I915_PARAM_HAS_BLT;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ bufmgr_gem->has_blt = ret == 0;
+
+ gp.param = I915_PARAM_HAS_RELAXED_FENCING;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ bufmgr_gem->has_relaxed_fencing = ret == 0;
+
+ bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
+
+ gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ bufmgr_gem->has_wait_timeout = ret == 0;
+
+ gp.param = I915_PARAM_HAS_LLC;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret != 0) {
+ /* Kernel does not supports HAS_LLC query, fallback to GPU
+ * generation detection and assume that we have LLC on GEN6/7
+ */
+ bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
+ IS_GEN7(bufmgr_gem->pci_device));
+ } else
+ bufmgr_gem->has_llc = *gp.value;
+
+ gp.param = I915_PARAM_HAS_VEBOX;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
+
+ gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret == 0 && *gp.value > 0)
+ bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
+
+ if (bufmgr_gem->gen < 4) {
+ gp.param = I915_PARAM_NUM_FENCES_AVAIL;
+ gp.value = &bufmgr_gem->available_fences;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret) {
+ fprintf(stderr, "get fences failed: %d [%d]\n", ret,
+ errno);
+ fprintf(stderr, "param: %d, val: %d\n", gp.param,
+ *gp.value);
+ bufmgr_gem->available_fences = 0;
+ } else {
+ /* XXX The kernel reports the total number of fences,
+ * including any that may be pinned.
+ *
+ * We presume that there will be at least one pinned
+ * fence for the scanout buffer, but there may be more
+ * than one scanout and the user may be manually
+ * pinning buffers. Let's move to execbuffer2 and
+ * thereby forget the insanity of using fences...
+ */
+ bufmgr_gem->available_fences -= 2;
+ if (bufmgr_gem->available_fences < 0)
+ bufmgr_gem->available_fences = 0;
+ }
+ }
+
+ if (bufmgr_gem->gen >= 8) {
+ gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret == 0 && *gp.value == 3)
+ bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
+ }
+
+ /* Let's go with one relocation per every 2 dwords (but round down a bit
+ * since a power of two will mean an extra page allocation for the reloc
+ * buffer).
+ *
+ * Every 4 was too few for the blender benchmark.
+ */
+ bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
+
+ bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
+ bufmgr_gem->bufmgr.bo_alloc_for_render =
+ drm_intel_gem_bo_alloc_for_render;
+ bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
+ bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
+ bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
+ bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
+ bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
+ bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
+ bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
+ bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
+ bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
+ bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
+ bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
+ bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
+ bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
+ bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
+ bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
+ /* Use the new one if available */
+ if (exec2) {
+ bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
+ bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
+ } else
+ bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
+ bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
+ bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
+ bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
+ bufmgr_gem->bufmgr.debug = 0;
+ bufmgr_gem->bufmgr.check_aperture_space =
+ drm_intel_gem_check_aperture_space;
+ bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
+ bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
+ bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
+ drm_intel_gem_get_pipe_from_crtc_id;
+ bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
+
+ DRMINITLISTHEAD(&bufmgr_gem->named);
+ init_cache_buckets(bufmgr_gem);
+
+ DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
+ bufmgr_gem->vma_max = -1; /* unlimited by default */
+
+ DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
+
+exit:
+ pthread_mutex_unlock(&bufmgr_list_mutex);
+
+ return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
+}
diff --git a/chromium/third_party/libdrm/src/intel/intel_bufmgr_priv.h b/chromium/third_party/libdrm/src/intel/intel_bufmgr_priv.h
new file mode 100644
index 00000000000..7e360a0b23d
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel_bufmgr_priv.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/**
+ * @file intel_bufmgr_priv.h
+ *
+ * Private definitions of Intel-specific bufmgr functions and structures.
+ */
+
+#ifndef INTEL_BUFMGR_PRIV_H
+#define INTEL_BUFMGR_PRIV_H
+
+/**
+ * Context for a buffer manager instance.
+ *
+ * Contains public methods followed by private storage for the buffer manager.
+ */
+struct _drm_intel_bufmgr {
+ /**
+ * Allocate a buffer object.
+ *
+ * Buffer objects are not necessarily initially mapped into CPU virtual
+ * address space or graphics device aperture. They must be mapped
+ * using bo_map() or drm_intel_gem_bo_map_gtt() to be used by the CPU.
+ */
+ drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
+
+ /**
+ * Allocate a buffer object, hinting that it will be used as a
+ * render target.
+ *
+ * This is otherwise the same as bo_alloc.
+ */
+ drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment);
+
+ /**
+ * Allocate a buffer object from an existing user accessible
+ * address malloc'd with the provided size.
+ * Alignment is used when mapping to the gtt.
+ * Flags may be I915_VMAP_READ_ONLY or I915_USERPTR_UNSYNCHRONIZED
+ */
+ drm_intel_bo *(*bo_alloc_userptr)(drm_intel_bufmgr *bufmgr,
+ const char *name, void *addr,
+ uint32_t tiling_mode, uint32_t stride,
+ unsigned long size,
+ unsigned long flags);
+
+ /**
+ * Allocate a tiled buffer object.
+ *
+ * Alignment for tiled objects is set automatically; the 'flags'
+ * argument provides a hint about how the object will be used initially.
+ *
+ * Valid tiling formats are:
+ * I915_TILING_NONE
+ * I915_TILING_X
+ * I915_TILING_Y
+ *
+ * Note the tiling format may be rejected; callers should check the
+ * 'tiling_mode' field on return, as well as the pitch value, which
+ * may have been rounded up to accommodate for tiling restrictions.
+ */
+ drm_intel_bo *(*bo_alloc_tiled) (drm_intel_bufmgr *bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags);
+
+ /** Takes a reference on a buffer object */
+ void (*bo_reference) (drm_intel_bo *bo);
+
+ /**
+ * Releases a reference on a buffer object, freeing the data if
+ * no references remain.
+ */
+ void (*bo_unreference) (drm_intel_bo *bo);
+
+ /**
+ * Maps the buffer into userspace.
+ *
+ * This function will block waiting for any existing execution on the
+ * buffer to complete, first. The resulting mapping is available at
+ * buf->virtual.
+ */
+ int (*bo_map) (drm_intel_bo *bo, int write_enable);
+
+ /**
+ * Reduces the refcount on the userspace mapping of the buffer
+ * object.
+ */
+ int (*bo_unmap) (drm_intel_bo *bo);
+
+ /**
+ * Write data into an object.
+ *
+ * This is an optional function, if missing,
+ * drm_intel_bo will map/memcpy/unmap.
+ */
+ int (*bo_subdata) (drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+
+ /**
+ * Read data from an object
+ *
+ * This is an optional function, if missing,
+ * drm_intel_bo will map/memcpy/unmap.
+ */
+ int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+
+ /**
+ * Waits for rendering to an object by the GPU to have completed.
+ *
+ * This is not required for any access to the BO by bo_map,
+ * bo_subdata, etc. It is merely a way for the driver to implement
+ * glFinish.
+ */
+ void (*bo_wait_rendering) (drm_intel_bo *bo);
+
+ /**
+ * Tears down the buffer manager instance.
+ */
+ void (*destroy) (drm_intel_bufmgr *bufmgr);
+
+ /**
+ * Indicate if the buffer can be placed anywhere in the full ppgtt
+ * address range (2^48).
+ *
+ * Any resource used with flat/heapless (0x00000000-0xfffff000)
+ * General State Heap (GSH) or Intructions State Heap (ISH) must
+ * be in a 32-bit range. 48-bit range will only be used when explicitly
+ * requested.
+ *
+ * \param bo Buffer to set the use_48b_address_range flag.
+ * \param enable The flag value.
+ */
+ void (*bo_use_48b_address_range) (drm_intel_bo *bo, uint32_t enable);
+
+ /**
+ * Add relocation entry in reloc_buf, which will be updated with the
+ * target buffer's real offset on on command submission.
+ *
+ * Relocations remain in place for the lifetime of the buffer object.
+ *
+ * \param bo Buffer to write the relocation into.
+ * \param offset Byte offset within reloc_bo of the pointer to
+ * target_bo.
+ * \param target_bo Buffer whose offset should be written into the
+ * relocation entry.
+ * \param target_offset Constant value to be added to target_bo's
+ * offset in relocation entry.
+ * \param read_domains GEM read domains which the buffer will be
+ * read into by the command that this relocation
+ * is part of.
+ * \param write_domains GEM read domains which the buffer will be
+ * dirtied in by the command that this
+ * relocation is part of.
+ */
+ int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+ int (*bo_emit_reloc_fence)(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo,
+ uint32_t target_offset,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+ /** Executes the command buffer pointed to by bo. */
+ int (*bo_exec) (drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4);
+
+ /** Executes the command buffer pointed to by bo on the selected
+ * ring buffer
+ */
+ int (*bo_mrb_exec) (drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4, unsigned flags);
+
+ /**
+ * Pin a buffer to the aperture and fix the offset until unpinned
+ *
+ * \param buf Buffer to pin
+ * \param alignment Required alignment for aperture, in bytes
+ */
+ int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment);
+
+ /**
+ * Unpin a buffer from the aperture, allowing it to be removed
+ *
+ * \param buf Buffer to unpin
+ */
+ int (*bo_unpin) (drm_intel_bo *bo);
+
+ /**
+ * Ask that the buffer be placed in tiling mode
+ *
+ * \param buf Buffer to set tiling mode for
+ * \param tiling_mode desired, and returned tiling mode
+ */
+ int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride);
+
+ /**
+ * Get the current tiling (and resulting swizzling) mode for the bo.
+ *
+ * \param buf Buffer to get tiling mode for
+ * \param tiling_mode returned tiling mode
+ * \param swizzle_mode returned swizzling mode
+ */
+ int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode);
+
+ /**
+ * Set the offset at which this buffer will be softpinned
+ * \param bo Buffer to set the softpin offset for
+ * \param offset Softpin offset
+ */
+ int (*bo_set_softpin_offset) (drm_intel_bo *bo, uint64_t offset);
+
+ /**
+ * Create a visible name for a buffer which can be used by other apps
+ *
+ * \param buf Buffer to create a name for
+ * \param name Returned name
+ */
+ int (*bo_flink) (drm_intel_bo *bo, uint32_t * name);
+
+ /**
+ * Returns 1 if mapping the buffer for write could cause the process
+ * to block, due to the object being active in the GPU.
+ */
+ int (*bo_busy) (drm_intel_bo *bo);
+
+ /**
+ * Specify the volatility of the buffer.
+ * \param bo Buffer to create a name for
+ * \param madv The purgeable status
+ *
+ * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
+ * reclaimed under memory pressure. If you subsequently require the buffer,
+ * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
+ *
+ * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
+ * marked as I915_MADV_DONTNEED.
+ */
+ int (*bo_madvise) (drm_intel_bo *bo, int madv);
+
+ int (*check_aperture_space) (drm_intel_bo ** bo_array, int count);
+
+ /**
+ * Disable buffer reuse for buffers which will be shared in some way,
+ * as with scanout buffers. When the buffer reference count goes to
+ * zero, it will be freed and not placed in the reuse list.
+ *
+ * \param bo Buffer to disable reuse for
+ */
+ int (*bo_disable_reuse) (drm_intel_bo *bo);
+
+ /**
+ * Query whether a buffer is reusable.
+ *
+ * \param bo Buffer to query
+ */
+ int (*bo_is_reusable) (drm_intel_bo *bo);
+
+ /**
+ *
+ * Return the pipe associated with a crtc_id so that vblank
+ * synchronization can use the correct data in the request.
+ * This is only supported for KMS and gem at this point, when
+ * unsupported, this function returns -1 and leaves the decision
+ * of what to do in that case to the caller
+ *
+ * \param bufmgr the associated buffer manager
+ * \param crtc_id the crtc identifier
+ */
+ int (*get_pipe_from_crtc_id) (drm_intel_bufmgr *bufmgr, int crtc_id);
+
+ /** Returns true if target_bo is in the relocation tree rooted at bo. */
+ int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo);
+
+ /**< Enables verbose debugging printouts */
+ int debug;
+};
+
+struct _drm_intel_context {
+ unsigned int ctx_id;
+ struct _drm_intel_bufmgr *bufmgr;
+};
+
+#define ALIGN(value, alignment) ((value + alignment - 1) & ~(alignment - 1))
+#define ROUND_UP_TO(x, y) (((x) + (y) - 1) / (y) * (y))
+#define ROUND_UP_TO_MB(x) ROUND_UP_TO((x), 1024*1024)
+
+#endif /* INTEL_BUFMGR_PRIV_H */
diff --git a/chromium/third_party/libdrm/src/intel/intel_chipset.h b/chromium/third_party/libdrm/src/intel/intel_chipset.h
new file mode 100644
index 00000000000..514f6594a15
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel_chipset.h
@@ -0,0 +1,462 @@
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_CHIPSET_H
+#define _INTEL_CHIPSET_H
+
+#define PCI_CHIP_I810 0x7121
+#define PCI_CHIP_I810_DC100 0x7123
+#define PCI_CHIP_I810_E 0x7125
+#define PCI_CHIP_I815 0x1132
+
+#define PCI_CHIP_I830_M 0x3577
+#define PCI_CHIP_845_G 0x2562
+#define PCI_CHIP_I855_GM 0x3582
+#define PCI_CHIP_I865_G 0x2572
+
+#define PCI_CHIP_I915_G 0x2582
+#define PCI_CHIP_E7221_G 0x258A
+#define PCI_CHIP_I915_GM 0x2592
+#define PCI_CHIP_I945_G 0x2772
+#define PCI_CHIP_I945_GM 0x27A2
+#define PCI_CHIP_I945_GME 0x27AE
+
+#define PCI_CHIP_Q35_G 0x29B2
+#define PCI_CHIP_G33_G 0x29C2
+#define PCI_CHIP_Q33_G 0x29D2
+
+#define PCI_CHIP_IGD_GM 0xA011
+#define PCI_CHIP_IGD_G 0xA001
+
+#define IS_IGDGM(devid) ((devid) == PCI_CHIP_IGD_GM)
+#define IS_IGDG(devid) ((devid) == PCI_CHIP_IGD_G)
+#define IS_IGD(devid) (IS_IGDG(devid) || IS_IGDGM(devid))
+
+#define PCI_CHIP_I965_G 0x29A2
+#define PCI_CHIP_I965_Q 0x2992
+#define PCI_CHIP_I965_G_1 0x2982
+#define PCI_CHIP_I946_GZ 0x2972
+#define PCI_CHIP_I965_GM 0x2A02
+#define PCI_CHIP_I965_GME 0x2A12
+
+#define PCI_CHIP_GM45_GM 0x2A42
+
+#define PCI_CHIP_IGD_E_G 0x2E02
+#define PCI_CHIP_Q45_G 0x2E12
+#define PCI_CHIP_G45_G 0x2E22
+#define PCI_CHIP_G41_G 0x2E32
+
+#define PCI_CHIP_ILD_G 0x0042
+#define PCI_CHIP_ILM_G 0x0046
+
+#define PCI_CHIP_SANDYBRIDGE_GT1 0x0102 /* desktop */
+#define PCI_CHIP_SANDYBRIDGE_GT2 0x0112
+#define PCI_CHIP_SANDYBRIDGE_GT2_PLUS 0x0122
+#define PCI_CHIP_SANDYBRIDGE_M_GT1 0x0106 /* mobile */
+#define PCI_CHIP_SANDYBRIDGE_M_GT2 0x0116
+#define PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS 0x0126
+#define PCI_CHIP_SANDYBRIDGE_S 0x010A /* server */
+
+#define PCI_CHIP_IVYBRIDGE_GT1 0x0152 /* desktop */
+#define PCI_CHIP_IVYBRIDGE_GT2 0x0162
+#define PCI_CHIP_IVYBRIDGE_M_GT1 0x0156 /* mobile */
+#define PCI_CHIP_IVYBRIDGE_M_GT2 0x0166
+#define PCI_CHIP_IVYBRIDGE_S 0x015a /* server */
+#define PCI_CHIP_IVYBRIDGE_S_GT2 0x016a /* server */
+
+#define PCI_CHIP_HASWELL_GT1 0x0402 /* Desktop */
+#define PCI_CHIP_HASWELL_GT2 0x0412
+#define PCI_CHIP_HASWELL_GT3 0x0422
+#define PCI_CHIP_HASWELL_M_GT1 0x0406 /* Mobile */
+#define PCI_CHIP_HASWELL_M_GT2 0x0416
+#define PCI_CHIP_HASWELL_M_GT3 0x0426
+#define PCI_CHIP_HASWELL_S_GT1 0x040A /* Server */
+#define PCI_CHIP_HASWELL_S_GT2 0x041A
+#define PCI_CHIP_HASWELL_S_GT3 0x042A
+#define PCI_CHIP_HASWELL_B_GT1 0x040B /* Reserved */
+#define PCI_CHIP_HASWELL_B_GT2 0x041B
+#define PCI_CHIP_HASWELL_B_GT3 0x042B
+#define PCI_CHIP_HASWELL_E_GT1 0x040E /* Reserved */
+#define PCI_CHIP_HASWELL_E_GT2 0x041E
+#define PCI_CHIP_HASWELL_E_GT3 0x042E
+#define PCI_CHIP_HASWELL_SDV_GT1 0x0C02 /* Desktop */
+#define PCI_CHIP_HASWELL_SDV_GT2 0x0C12
+#define PCI_CHIP_HASWELL_SDV_GT3 0x0C22
+#define PCI_CHIP_HASWELL_SDV_M_GT1 0x0C06 /* Mobile */
+#define PCI_CHIP_HASWELL_SDV_M_GT2 0x0C16
+#define PCI_CHIP_HASWELL_SDV_M_GT3 0x0C26
+#define PCI_CHIP_HASWELL_SDV_S_GT1 0x0C0A /* Server */
+#define PCI_CHIP_HASWELL_SDV_S_GT2 0x0C1A
+#define PCI_CHIP_HASWELL_SDV_S_GT3 0x0C2A
+#define PCI_CHIP_HASWELL_SDV_B_GT1 0x0C0B /* Reserved */
+#define PCI_CHIP_HASWELL_SDV_B_GT2 0x0C1B
+#define PCI_CHIP_HASWELL_SDV_B_GT3 0x0C2B
+#define PCI_CHIP_HASWELL_SDV_E_GT1 0x0C0E /* Reserved */
+#define PCI_CHIP_HASWELL_SDV_E_GT2 0x0C1E
+#define PCI_CHIP_HASWELL_SDV_E_GT3 0x0C2E
+#define PCI_CHIP_HASWELL_ULT_GT1 0x0A02 /* Desktop */
+#define PCI_CHIP_HASWELL_ULT_GT2 0x0A12
+#define PCI_CHIP_HASWELL_ULT_GT3 0x0A22
+#define PCI_CHIP_HASWELL_ULT_M_GT1 0x0A06 /* Mobile */
+#define PCI_CHIP_HASWELL_ULT_M_GT2 0x0A16
+#define PCI_CHIP_HASWELL_ULT_M_GT3 0x0A26
+#define PCI_CHIP_HASWELL_ULT_S_GT1 0x0A0A /* Server */
+#define PCI_CHIP_HASWELL_ULT_S_GT2 0x0A1A
+#define PCI_CHIP_HASWELL_ULT_S_GT3 0x0A2A
+#define PCI_CHIP_HASWELL_ULT_B_GT1 0x0A0B /* Reserved */
+#define PCI_CHIP_HASWELL_ULT_B_GT2 0x0A1B
+#define PCI_CHIP_HASWELL_ULT_B_GT3 0x0A2B
+#define PCI_CHIP_HASWELL_ULT_E_GT1 0x0A0E /* Reserved */
+#define PCI_CHIP_HASWELL_ULT_E_GT2 0x0A1E
+#define PCI_CHIP_HASWELL_ULT_E_GT3 0x0A2E
+#define PCI_CHIP_HASWELL_CRW_GT1 0x0D02 /* Desktop */
+#define PCI_CHIP_HASWELL_CRW_GT2 0x0D12
+#define PCI_CHIP_HASWELL_CRW_GT3 0x0D22
+#define PCI_CHIP_HASWELL_CRW_M_GT1 0x0D06 /* Mobile */
+#define PCI_CHIP_HASWELL_CRW_M_GT2 0x0D16
+#define PCI_CHIP_HASWELL_CRW_M_GT3 0x0D26
+#define PCI_CHIP_HASWELL_CRW_S_GT1 0x0D0A /* Server */
+#define PCI_CHIP_HASWELL_CRW_S_GT2 0x0D1A
+#define PCI_CHIP_HASWELL_CRW_S_GT3 0x0D2A
+#define PCI_CHIP_HASWELL_CRW_B_GT1 0x0D0B /* Reserved */
+#define PCI_CHIP_HASWELL_CRW_B_GT2 0x0D1B
+#define PCI_CHIP_HASWELL_CRW_B_GT3 0x0D2B
+#define PCI_CHIP_HASWELL_CRW_E_GT1 0x0D0E /* Reserved */
+#define PCI_CHIP_HASWELL_CRW_E_GT2 0x0D1E
+#define PCI_CHIP_HASWELL_CRW_E_GT3 0x0D2E
+#define BDW_SPARE 0x2
+#define BDW_ULT 0x6
+#define BDW_SERVER 0xa
+#define BDW_IRIS 0xb
+#define BDW_WORKSTATION 0xd
+#define BDW_ULX 0xe
+
+#define PCI_CHIP_VALLEYVIEW_PO 0x0f30 /* VLV PO board */
+#define PCI_CHIP_VALLEYVIEW_1 0x0f31
+#define PCI_CHIP_VALLEYVIEW_2 0x0f32
+#define PCI_CHIP_VALLEYVIEW_3 0x0f33
+
+#define PCI_CHIP_CHERRYVIEW_0 0x22b0
+#define PCI_CHIP_CHERRYVIEW_1 0x22b1
+#define PCI_CHIP_CHERRYVIEW_2 0x22b2
+#define PCI_CHIP_CHERRYVIEW_3 0x22b3
+
+#define PCI_CHIP_SKYLAKE_DT_GT1 0x1902
+#define PCI_CHIP_SKYLAKE_ULT_GT1 0x1906
+#define PCI_CHIP_SKYLAKE_SRV_GT1 0x190A /* Reserved */
+#define PCI_CHIP_SKYLAKE_H_GT1 0x190B
+#define PCI_CHIP_SKYLAKE_ULX_GT1 0x190E /* Reserved */
+#define PCI_CHIP_SKYLAKE_DT_GT2 0x1912
+#define PCI_CHIP_SKYLAKE_FUSED0_GT2 0x1913 /* Reserved */
+#define PCI_CHIP_SKYLAKE_FUSED1_GT2 0x1915 /* Reserved */
+#define PCI_CHIP_SKYLAKE_ULT_GT2 0x1916
+#define PCI_CHIP_SKYLAKE_FUSED2_GT2 0x1917 /* Reserved */
+#define PCI_CHIP_SKYLAKE_SRV_GT2 0x191A /* Reserved */
+#define PCI_CHIP_SKYLAKE_HALO_GT2 0x191B
+#define PCI_CHIP_SKYLAKE_WKS_GT2 0x191D
+#define PCI_CHIP_SKYLAKE_ULX_GT2 0x191E
+#define PCI_CHIP_SKYLAKE_MOBILE_GT2 0x1921 /* Reserved */
+#define PCI_CHIP_SKYLAKE_ULT_GT3_0 0x1923
+#define PCI_CHIP_SKYLAKE_ULT_GT3_1 0x1926
+#define PCI_CHIP_SKYLAKE_ULT_GT3_2 0x1927
+#define PCI_CHIP_SKYLAKE_SRV_GT4 0x192A
+#define PCI_CHIP_SKYLAKE_HALO_GT3 0x192B /* Reserved */
+#define PCI_CHIP_SKYLAKE_SRV_GT3 0x192D
+#define PCI_CHIP_SKYLAKE_DT_GT4 0x1932
+#define PCI_CHIP_SKYLAKE_SRV_GT4X 0x193A
+#define PCI_CHIP_SKYLAKE_H_GT4 0x193B
+#define PCI_CHIP_SKYLAKE_WKS_GT4 0x193D
+
+#define PCI_CHIP_KABYLAKE_ULT_GT2 0x5916
+#define PCI_CHIP_KABYLAKE_ULT_GT1_5 0x5913
+#define PCI_CHIP_KABYLAKE_ULT_GT1 0x5906
+#define PCI_CHIP_KABYLAKE_ULT_GT3_0 0x5923
+#define PCI_CHIP_KABYLAKE_ULT_GT3_1 0x5926
+#define PCI_CHIP_KABYLAKE_ULT_GT3_2 0x5927
+#define PCI_CHIP_KABYLAKE_ULT_GT2F 0x5921
+#define PCI_CHIP_KABYLAKE_ULX_GT1_5 0x5915
+#define PCI_CHIP_KABYLAKE_ULX_GT1 0x590E
+#define PCI_CHIP_KABYLAKE_ULX_GT2 0x591E
+#define PCI_CHIP_KABYLAKE_DT_GT2 0x5912
+#define PCI_CHIP_KABYLAKE_DT_GT1_5 0x5917
+#define PCI_CHIP_KABYLAKE_DT_GT1 0x5902
+#define PCI_CHIP_KABYLAKE_HALO_GT2 0x591B
+#define PCI_CHIP_KABYLAKE_HALO_GT4 0x593B
+#define PCI_CHIP_KABYLAKE_HALO_GT1_0 0x5908
+#define PCI_CHIP_KABYLAKE_HALO_GT1_1 0x590B
+#define PCI_CHIP_KABYLAKE_SRV_GT2 0x591A
+#define PCI_CHIP_KABYLAKE_SRV_GT1 0x590A
+#define PCI_CHIP_KABYLAKE_WKS_GT2 0x591D
+
+#define PCI_CHIP_BROXTON_0 0x0A84
+#define PCI_CHIP_BROXTON_1 0x1A84
+#define PCI_CHIP_BROXTON_2 0x5A84
+#define PCI_CHIP_BROXTON_3 0x1A85
+#define PCI_CHIP_BROXTON_4 0x5A85
+
+#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \
+ (devid) == PCI_CHIP_I915_GM || \
+ (devid) == PCI_CHIP_I945_GM || \
+ (devid) == PCI_CHIP_I945_GME || \
+ (devid) == PCI_CHIP_I965_GM || \
+ (devid) == PCI_CHIP_I965_GME || \
+ (devid) == PCI_CHIP_GM45_GM || IS_IGD(devid) || \
+ (devid) == PCI_CHIP_IVYBRIDGE_M_GT1 || \
+ (devid) == PCI_CHIP_IVYBRIDGE_M_GT2)
+
+#define IS_G45(devid) ((devid) == PCI_CHIP_IGD_E_G || \
+ (devid) == PCI_CHIP_Q45_G || \
+ (devid) == PCI_CHIP_G45_G || \
+ (devid) == PCI_CHIP_G41_G)
+#define IS_GM45(devid) ((devid) == PCI_CHIP_GM45_GM)
+#define IS_G4X(devid) (IS_G45(devid) || IS_GM45(devid))
+
+#define IS_ILD(devid) ((devid) == PCI_CHIP_ILD_G)
+#define IS_ILM(devid) ((devid) == PCI_CHIP_ILM_G)
+
+#define IS_915(devid) ((devid) == PCI_CHIP_I915_G || \
+ (devid) == PCI_CHIP_E7221_G || \
+ (devid) == PCI_CHIP_I915_GM)
+
+#define IS_945GM(devid) ((devid) == PCI_CHIP_I945_GM || \
+ (devid) == PCI_CHIP_I945_GME)
+
+#define IS_945(devid) ((devid) == PCI_CHIP_I945_G || \
+ (devid) == PCI_CHIP_I945_GM || \
+ (devid) == PCI_CHIP_I945_GME || \
+ IS_G33(devid))
+
+#define IS_G33(devid) ((devid) == PCI_CHIP_G33_G || \
+ (devid) == PCI_CHIP_Q33_G || \
+ (devid) == PCI_CHIP_Q35_G || IS_IGD(devid))
+
+#define IS_GEN2(devid) ((devid) == PCI_CHIP_I830_M || \
+ (devid) == PCI_CHIP_845_G || \
+ (devid) == PCI_CHIP_I855_GM || \
+ (devid) == PCI_CHIP_I865_G)
+
+#define IS_GEN3(devid) (IS_945(devid) || IS_915(devid))
+
+#define IS_GEN4(devid) ((devid) == PCI_CHIP_I965_G || \
+ (devid) == PCI_CHIP_I965_Q || \
+ (devid) == PCI_CHIP_I965_G_1 || \
+ (devid) == PCI_CHIP_I965_GM || \
+ (devid) == PCI_CHIP_I965_GME || \
+ (devid) == PCI_CHIP_I946_GZ || \
+ IS_G4X(devid))
+
+#define IS_GEN5(devid) (IS_ILD(devid) || IS_ILM(devid))
+
+#define IS_GEN6(devid) ((devid) == PCI_CHIP_SANDYBRIDGE_GT1 || \
+ (devid) == PCI_CHIP_SANDYBRIDGE_GT2 || \
+ (devid) == PCI_CHIP_SANDYBRIDGE_GT2_PLUS || \
+ (devid) == PCI_CHIP_SANDYBRIDGE_M_GT1 || \
+ (devid) == PCI_CHIP_SANDYBRIDGE_M_GT2 || \
+ (devid) == PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS || \
+ (devid) == PCI_CHIP_SANDYBRIDGE_S)
+
+#define IS_GEN7(devid) (IS_IVYBRIDGE(devid) || \
+ IS_HASWELL(devid) || \
+ IS_VALLEYVIEW(devid))
+
+#define IS_IVYBRIDGE(devid) ((devid) == PCI_CHIP_IVYBRIDGE_GT1 || \
+ (devid) == PCI_CHIP_IVYBRIDGE_GT2 || \
+ (devid) == PCI_CHIP_IVYBRIDGE_M_GT1 || \
+ (devid) == PCI_CHIP_IVYBRIDGE_M_GT2 || \
+ (devid) == PCI_CHIP_IVYBRIDGE_S || \
+ (devid) == PCI_CHIP_IVYBRIDGE_S_GT2)
+
+#define IS_VALLEYVIEW(devid) ((devid) == PCI_CHIP_VALLEYVIEW_PO || \
+ (devid) == PCI_CHIP_VALLEYVIEW_1 || \
+ (devid) == PCI_CHIP_VALLEYVIEW_2 || \
+ (devid) == PCI_CHIP_VALLEYVIEW_3)
+
+#define IS_HSW_GT1(devid) ((devid) == PCI_CHIP_HASWELL_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_M_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_S_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_B_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_E_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_M_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_S_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_B_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_E_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_M_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_S_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_B_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_E_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_M_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_S_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_B_GT1 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_E_GT1)
+#define IS_HSW_GT2(devid) ((devid) == PCI_CHIP_HASWELL_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_M_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_S_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_B_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_E_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_M_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_S_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_B_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_E_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_M_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_S_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_B_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_E_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_M_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_S_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_B_GT2 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_E_GT2)
+#define IS_HSW_GT3(devid) ((devid) == PCI_CHIP_HASWELL_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_M_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_S_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_B_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_E_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_M_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_S_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_B_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_SDV_E_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_M_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_S_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_B_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_ULT_E_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_M_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_S_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_B_GT3 || \
+ (devid) == PCI_CHIP_HASWELL_CRW_E_GT3)
+
+#define IS_HASWELL(devid) (IS_HSW_GT1(devid) || \
+ IS_HSW_GT2(devid) || \
+ IS_HSW_GT3(devid))
+
+#define IS_BROADWELL(devid) (((devid & 0xff00) != 0x1600) ? 0 : \
+ (((devid & 0x00f0) >> 4) > 3) ? 0 : \
+ ((devid & 0x000f) == BDW_SPARE) ? 1 : \
+ ((devid & 0x000f) == BDW_ULT) ? 1 : \
+ ((devid & 0x000f) == BDW_IRIS) ? 1 : \
+ ((devid & 0x000f) == BDW_SERVER) ? 1 : \
+ ((devid & 0x000f) == BDW_WORKSTATION) ? 1 : \
+ ((devid & 0x000f) == BDW_ULX) ? 1 : 0)
+
+#define IS_CHERRYVIEW(devid) ((devid) == PCI_CHIP_CHERRYVIEW_0 || \
+ (devid) == PCI_CHIP_CHERRYVIEW_1 || \
+ (devid) == PCI_CHIP_CHERRYVIEW_2 || \
+ (devid) == PCI_CHIP_CHERRYVIEW_3)
+
+#define IS_GEN8(devid) (IS_BROADWELL(devid) || \
+ IS_CHERRYVIEW(devid))
+
+#define IS_SKL_GT1(devid) ((devid) == PCI_CHIP_SKYLAKE_DT_GT1 || \
+ (devid) == PCI_CHIP_SKYLAKE_ULT_GT1 || \
+ (devid) == PCI_CHIP_SKYLAKE_SRV_GT1 || \
+ (devid) == PCI_CHIP_SKYLAKE_H_GT1 || \
+ (devid) == PCI_CHIP_SKYLAKE_ULX_GT1)
+
+#define IS_SKL_GT2(devid) ((devid) == PCI_CHIP_SKYLAKE_DT_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_FUSED0_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_FUSED1_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_ULT_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_FUSED2_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_SRV_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_HALO_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_WKS_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_ULX_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_MOBILE_GT2)
+
+#define IS_SKL_GT3(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT3_0 || \
+ (devid) == PCI_CHIP_SKYLAKE_ULT_GT3_1 || \
+ (devid) == PCI_CHIP_SKYLAKE_ULT_GT3_2 || \
+ (devid) == PCI_CHIP_SKYLAKE_HALO_GT3 || \
+ (devid) == PCI_CHIP_SKYLAKE_SRV_GT3)
+
+#define IS_SKL_GT4(devid) ((devid) == PCI_CHIP_SKYLAKE_SRV_GT4 || \
+ (devid) == PCI_CHIP_SKYLAKE_DT_GT4 || \
+ (devid) == PCI_CHIP_SKYLAKE_SRV_GT4X || \
+ (devid) == PCI_CHIP_SKYLAKE_H_GT4 || \
+ (devid) == PCI_CHIP_SKYLAKE_WKS_GT4)
+
+#define IS_KBL_GT1(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT1_5 || \
+ (devid) == PCI_CHIP_KABYLAKE_ULX_GT1_5 || \
+ (devid) == PCI_CHIP_KABYLAKE_DT_GT1_5 || \
+ (devid) == PCI_CHIP_KABYLAKE_ULT_GT1 || \
+ (devid) == PCI_CHIP_KABYLAKE_ULX_GT1 || \
+ (devid) == PCI_CHIP_KABYLAKE_DT_GT1 || \
+ (devid) == PCI_CHIP_KABYLAKE_HALO_GT1_0 || \
+ (devid) == PCI_CHIP_KABYLAKE_HALO_GT1_1 || \
+ (devid) == PCI_CHIP_KABYLAKE_SRV_GT1)
+
+#define IS_KBL_GT2(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT2 || \
+ (devid) == PCI_CHIP_KABYLAKE_ULT_GT2F || \
+ (devid) == PCI_CHIP_KABYLAKE_ULX_GT2 || \
+ (devid) == PCI_CHIP_KABYLAKE_DT_GT2 || \
+ (devid) == PCI_CHIP_KABYLAKE_HALO_GT2 || \
+ (devid) == PCI_CHIP_KABYLAKE_SRV_GT2 || \
+ (devid) == PCI_CHIP_KABYLAKE_WKS_GT2)
+
+#define IS_KBL_GT3(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT3_0 || \
+ (devid) == PCI_CHIP_KABYLAKE_ULT_GT3_1 || \
+ (devid) == PCI_CHIP_KABYLAKE_ULT_GT3_2)
+
+#define IS_KBL_GT4(devid) ((devid) == PCI_CHIP_KABYLAKE_HALO_GT4)
+
+#define IS_KABYLAKE(devid) (IS_KBL_GT1(devid) || \
+ IS_KBL_GT2(devid) || \
+ IS_KBL_GT3(devid) || \
+ IS_KBL_GT4(devid))
+
+#define IS_SKYLAKE(devid) (IS_SKL_GT1(devid) || \
+ IS_SKL_GT2(devid) || \
+ IS_SKL_GT3(devid) || \
+ IS_SKL_GT4(devid))
+
+#define IS_BROXTON(devid) ((devid) == PCI_CHIP_BROXTON_0 || \
+ (devid) == PCI_CHIP_BROXTON_1 || \
+ (devid) == PCI_CHIP_BROXTON_2 || \
+ (devid) == PCI_CHIP_BROXTON_3 || \
+ (devid) == PCI_CHIP_BROXTON_4)
+
+#define IS_GEN9(devid) (IS_SKYLAKE(devid) || \
+ IS_BROXTON(devid) || \
+ IS_KABYLAKE(devid))
+
+#define IS_9XX(dev) (IS_GEN3(dev) || \
+ IS_GEN4(dev) || \
+ IS_GEN5(dev) || \
+ IS_GEN6(dev) || \
+ IS_GEN7(dev) || \
+ IS_GEN8(dev) || \
+ IS_GEN9(dev))
+
+
+#endif /* _INTEL_CHIPSET_H */
diff --git a/chromium/third_party/libdrm/src/intel/intel_debug.h b/chromium/third_party/libdrm/src/intel/intel_debug.h
new file mode 100644
index 00000000000..fa0737c6ed6
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel_debug.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#ifndef INTEL_DEBUG_H
+#define INTEL_DEBUG_H
+
+#include <stdint.h>
+
+#define SHADER_DEBUG_SOCKET "/var/run/gen_debug"
+#define DEBUG_HANDSHAKE_VERSION 0x3
+#define DEBUG_HANDSHAKE_ACK "okay"
+
+/* First byte must always be the 1 byte version */
+struct intel_debug_handshake {
+ uint32_t version;
+ int flink_handle;
+ uint32_t per_thread_scratch;
+} __attribute__((packed));
+
+#endif
diff --git a/chromium/third_party/libdrm/src/intel/intel_decode.c b/chromium/third_party/libdrm/src/intel/intel_decode.c
new file mode 100644
index 00000000000..803d20293ff
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/intel_decode.c
@@ -0,0 +1,3983 @@
+/*
+ * Copyright © 2009-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "intel_chipset.h"
+#include "intel_bufmgr.h"
+
+
+/* Struct for tracking drm_intel_decode state. */
+struct drm_intel_decode {
+ /** stdio file where the output should land. Defaults to stdout. */
+ FILE *out;
+
+ /** PCI device ID. */
+ uint32_t devid;
+
+ /**
+ * Shorthand device identifier: 3 is 915, 4 is 965, 5 is
+ * Ironlake, etc.
+ */
+ int gen;
+
+ /** GPU address of the start of the current packet. */
+ uint32_t hw_offset;
+ /** CPU virtual address of the start of the current packet. */
+ uint32_t *data;
+ /** DWORDs of remaining batchbuffer data starting from the packet. */
+ uint32_t count;
+
+ /** GPU address of the start of the batchbuffer data. */
+ uint32_t base_hw_offset;
+ /** CPU Virtual address of the start of the batchbuffer data. */
+ uint32_t *base_data;
+ /** Number of DWORDs of batchbuffer data. */
+ uint32_t base_count;
+
+ /** @{
+ * GPU head and tail pointers, which will be noted in the dump, or ~0.
+ */
+ uint32_t head, tail;
+ /** @} */
+
+ /**
+ * Whether to dump the dwords after MI_BATCHBUFFER_END.
+ *
+ * This sometimes provides clues in corrupted batchbuffers,
+ * and is used by the intel-gpu-tools.
+ */
+ bool dump_past_end;
+
+ bool overflowed;
+};
+
+static FILE *out;
+static uint32_t saved_s2 = 0, saved_s4 = 0;
+static char saved_s2_set = 0, saved_s4_set = 0;
+static uint32_t head_offset = 0xffffffff; /* undefined */
+static uint32_t tail_offset = 0xffffffff; /* undefined */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(A) (sizeof(A)/sizeof(A[0]))
+#endif
+
+#define BUFFER_FAIL(_count, _len, _name) do { \
+ fprintf(out, "Buffer size too small in %s (%d < %d)\n", \
+ (_name), (_count), (_len)); \
+ return _count; \
+} while (0)
+
+static float int_as_float(uint32_t intval)
+{
+ union intfloat {
+ uint32_t i;
+ float f;
+ } uval;
+
+ uval.i = intval;
+ return uval.f;
+}
+
+static void DRM_PRINTFLIKE(3, 4)
+instr_out(struct drm_intel_decode *ctx, unsigned int index,
+ const char *fmt, ...)
+{
+ va_list va;
+ const char *parseinfo;
+ uint32_t offset = ctx->hw_offset + index * 4;
+
+ if (index > ctx->count) {
+ if (!ctx->overflowed) {
+ fprintf(out, "ERROR: Decode attempted to continue beyond end of batchbuffer\n");
+ ctx->overflowed = true;
+ }
+ return;
+ }
+
+ if (offset == head_offset)
+ parseinfo = "HEAD";
+ else if (offset == tail_offset)
+ parseinfo = "TAIL";
+ else
+ parseinfo = " ";
+
+ fprintf(out, "0x%08x: %s 0x%08x: %s", offset, parseinfo,
+ ctx->data[index], index == 0 ? "" : " ");
+ va_start(va, fmt);
+ vfprintf(out, fmt, va);
+ va_end(va);
+}
+
+static int
+decode_MI_SET_CONTEXT(struct drm_intel_decode *ctx)
+{
+ uint32_t data = ctx->data[1];
+ if (ctx->gen > 7)
+ return 1;
+
+ instr_out(ctx, 0, "MI_SET_CONTEXT\n");
+ instr_out(ctx, 1, "gtt offset = 0x%x%s%s\n",
+ data & ~0xfff,
+ data & (1<<1)? ", Force Restore": "",
+ data & (1<<0)? ", Restore Inhibit": "");
+
+ return 2;
+}
+
+static int
+decode_MI_WAIT_FOR_EVENT(struct drm_intel_decode *ctx)
+{
+ const char *cc_wait;
+ int cc_shift = 0;
+ uint32_t data = ctx->data[0];
+
+ if (ctx->gen <= 5)
+ cc_shift = 9;
+ else
+ cc_shift = 16;
+
+ switch ((data >> cc_shift) & 0x1f) {
+ case 1:
+ cc_wait = ", cc wait 1";
+ break;
+ case 2:
+ cc_wait = ", cc wait 2";
+ break;
+ case 3:
+ cc_wait = ", cc wait 3";
+ break;
+ case 4:
+ cc_wait = ", cc wait 4";
+ break;
+ case 5:
+ cc_wait = ", cc wait 4";
+ break;
+ default:
+ cc_wait = "";
+ break;
+ }
+
+ if (ctx->gen <= 5) {
+ instr_out(ctx, 0, "MI_WAIT_FOR_EVENT%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ data & (1<<18)? ", pipe B start vblank wait": "",
+ data & (1<<17)? ", pipe A start vblank wait": "",
+ data & (1<<16)? ", overlay flip pending wait": "",
+ data & (1<<14)? ", pipe B hblank wait": "",
+ data & (1<<13)? ", pipe A hblank wait": "",
+ cc_wait,
+ data & (1<<8)? ", plane C pending flip wait": "",
+ data & (1<<7)? ", pipe B vblank wait": "",
+ data & (1<<6)? ", plane B pending flip wait": "",
+ data & (1<<5)? ", pipe B scan line wait": "",
+ data & (1<<4)? ", fbc idle wait": "",
+ data & (1<<3)? ", pipe A vblank wait": "",
+ data & (1<<2)? ", plane A pending flip wait": "",
+ data & (1<<1)? ", plane A scan line wait": "");
+ } else {
+ instr_out(ctx, 0, "MI_WAIT_FOR_EVENT%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ data & (1<<20)? ", sprite C pending flip wait": "", /* ivb */
+ cc_wait,
+ data & (1<<13)? ", pipe B hblank wait": "",
+ data & (1<<11)? ", pipe B vblank wait": "",
+ data & (1<<10)? ", sprite B pending flip wait": "",
+ data & (1<<9)? ", plane B pending flip wait": "",
+ data & (1<<8)? ", plane B scan line wait": "",
+ data & (1<<5)? ", pipe A hblank wait": "",
+ data & (1<<3)? ", pipe A vblank wait": "",
+ data & (1<<2)? ", sprite A pending flip wait": "",
+ data & (1<<1)? ", plane A pending flip wait": "",
+ data & (1<<0)? ", plane A scan line wait": "");
+ }
+
+ return 1;
+}
+
+static int
+decode_mi(struct drm_intel_decode *ctx)
+{
+ unsigned int opcode, len = -1;
+ const char *post_sync_op = "";
+ uint32_t *data = ctx->data;
+
+ struct {
+ uint32_t opcode;
+ int len_mask;
+ unsigned int min_len;
+ unsigned int max_len;
+ const char *name;
+ int (*func)(struct drm_intel_decode *ctx);
+ } opcodes_mi[] = {
+ { 0x08, 0, 1, 1, "MI_ARB_ON_OFF" },
+ { 0x0a, 0, 1, 1, "MI_BATCH_BUFFER_END" },
+ { 0x30, 0x3f, 3, 3, "MI_BATCH_BUFFER" },
+ { 0x31, 0x3f, 2, 2, "MI_BATCH_BUFFER_START" },
+ { 0x14, 0x3f, 3, 3, "MI_DISPLAY_BUFFER_INFO" },
+ { 0x04, 0, 1, 1, "MI_FLUSH" },
+ { 0x22, 0x1f, 3, 3, "MI_LOAD_REGISTER_IMM" },
+ { 0x13, 0x3f, 2, 2, "MI_LOAD_SCAN_LINES_EXCL" },
+ { 0x12, 0x3f, 2, 2, "MI_LOAD_SCAN_LINES_INCL" },
+ { 0x00, 0, 1, 1, "MI_NOOP" },
+ { 0x11, 0x3f, 2, 2, "MI_OVERLAY_FLIP" },
+ { 0x07, 0, 1, 1, "MI_REPORT_HEAD" },
+ { 0x18, 0x3f, 2, 2, "MI_SET_CONTEXT", decode_MI_SET_CONTEXT },
+ { 0x20, 0x3f, 3, 4, "MI_STORE_DATA_IMM" },
+ { 0x21, 0x3f, 3, 4, "MI_STORE_DATA_INDEX" },
+ { 0x24, 0x3f, 3, 3, "MI_STORE_REGISTER_MEM" },
+ { 0x02, 0, 1, 1, "MI_USER_INTERRUPT" },
+ { 0x03, 0, 1, 1, "MI_WAIT_FOR_EVENT", decode_MI_WAIT_FOR_EVENT },
+ { 0x16, 0x7f, 3, 3, "MI_SEMAPHORE_MBOX" },
+ { 0x26, 0x1f, 3, 4, "MI_FLUSH_DW" },
+ { 0x28, 0x3f, 3, 3, "MI_REPORT_PERF_COUNT" },
+ { 0x29, 0xff, 3, 3, "MI_LOAD_REGISTER_MEM" },
+ { 0x0b, 0, 1, 1, "MI_SUSPEND_FLUSH"},
+ }, *opcode_mi = NULL;
+
+ /* check instruction length */
+ for (opcode = 0; opcode < sizeof(opcodes_mi) / sizeof(opcodes_mi[0]);
+ opcode++) {
+ if ((data[0] & 0x1f800000) >> 23 == opcodes_mi[opcode].opcode) {
+ len = 1;
+ if (opcodes_mi[opcode].max_len > 1) {
+ len =
+ (data[0] & opcodes_mi[opcode].len_mask) + 2;
+ if (len < opcodes_mi[opcode].min_len
+ || len > opcodes_mi[opcode].max_len) {
+ fprintf(out,
+ "Bad length (%d) in %s, [%d, %d]\n",
+ len, opcodes_mi[opcode].name,
+ opcodes_mi[opcode].min_len,
+ opcodes_mi[opcode].max_len);
+ }
+ }
+ opcode_mi = &opcodes_mi[opcode];
+ break;
+ }
+ }
+
+ if (opcode_mi && opcode_mi->func)
+ return opcode_mi->func(ctx);
+
+ switch ((data[0] & 0x1f800000) >> 23) {
+ case 0x0a:
+ instr_out(ctx, 0, "MI_BATCH_BUFFER_END\n");
+ return -1;
+ case 0x16:
+ instr_out(ctx, 0, "MI_SEMAPHORE_MBOX%s%s%s%s %u\n",
+ data[0] & (1 << 22) ? " global gtt," : "",
+ data[0] & (1 << 21) ? " update semaphore," : "",
+ data[0] & (1 << 20) ? " compare semaphore," : "",
+ data[0] & (1 << 18) ? " use compare reg" : "",
+ (data[0] & (0x3 << 16)) >> 16);
+ instr_out(ctx, 1, "value\n");
+ instr_out(ctx, 2, "address\n");
+ return len;
+ case 0x21:
+ instr_out(ctx, 0, "MI_STORE_DATA_INDEX%s\n",
+ data[0] & (1 << 21) ? " use per-process HWS," : "");
+ instr_out(ctx, 1, "index\n");
+ instr_out(ctx, 2, "dword\n");
+ if (len == 4)
+ instr_out(ctx, 3, "upper dword\n");
+ return len;
+ case 0x00:
+ if (data[0] & (1 << 22))
+ instr_out(ctx, 0,
+ "MI_NOOP write NOPID reg, val=0x%x\n",
+ data[0] & ((1 << 22) - 1));
+ else
+ instr_out(ctx, 0, "MI_NOOP\n");
+ return len;
+ case 0x26:
+ switch (data[0] & (0x3 << 14)) {
+ case (0 << 14):
+ post_sync_op = "no write";
+ break;
+ case (1 << 14):
+ post_sync_op = "write data";
+ break;
+ case (2 << 14):
+ post_sync_op = "reserved";
+ break;
+ case (3 << 14):
+ post_sync_op = "write TIMESTAMP";
+ break;
+ }
+ instr_out(ctx, 0,
+ "MI_FLUSH_DW%s%s%s%s post_sync_op='%s' %s%s\n",
+ data[0] & (1 << 22) ?
+ " enable protected mem (BCS-only)," : "",
+ data[0] & (1 << 21) ? " store in hws," : "",
+ data[0] & (1 << 18) ? " invalidate tlb," : "",
+ data[0] & (1 << 17) ? " flush gfdt," : "",
+ post_sync_op,
+ data[0] & (1 << 8) ? " enable notify interrupt," : "",
+ data[0] & (1 << 7) ?
+ " invalidate video state (BCS-only)," : "");
+ if (data[0] & (1 << 21))
+ instr_out(ctx, 1, "hws index\n");
+ else
+ instr_out(ctx, 1, "address\n");
+ instr_out(ctx, 2, "dword\n");
+ if (len == 4)
+ instr_out(ctx, 3, "upper dword\n");
+ return len;
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_mi) / sizeof(opcodes_mi[0]);
+ opcode++) {
+ if ((data[0] & 0x1f800000) >> 23 == opcodes_mi[opcode].opcode) {
+ unsigned int i;
+
+ instr_out(ctx, 0, "%s\n",
+ opcodes_mi[opcode].name);
+ for (i = 1; i < len; i++) {
+ instr_out(ctx, i, "dword %d\n", i);
+ }
+
+ return len;
+ }
+ }
+
+ instr_out(ctx, 0, "MI UNKNOWN\n");
+ return 1;
+}
+
+static void
+decode_2d_br00(struct drm_intel_decode *ctx, const char *cmd)
+{
+ instr_out(ctx, 0,
+ "%s (rgb %sabled, alpha %sabled, src tile %d, dst tile %d)\n",
+ cmd,
+ (ctx->data[0] & (1 << 20)) ? "en" : "dis",
+ (ctx->data[0] & (1 << 21)) ? "en" : "dis",
+ (ctx->data[0] >> 15) & 1,
+ (ctx->data[0] >> 11) & 1);
+}
+
+static void
+decode_2d_br01(struct drm_intel_decode *ctx)
+{
+ const char *format;
+ switch ((ctx->data[1] >> 24) & 0x3) {
+ case 0:
+ format = "8";
+ break;
+ case 1:
+ format = "565";
+ break;
+ case 2:
+ format = "1555";
+ break;
+ case 3:
+ format = "8888";
+ break;
+ }
+
+ instr_out(ctx, 1,
+ "format %s, pitch %d, rop 0x%02x, "
+ "clipping %sabled, %s%s \n",
+ format,
+ (short)(ctx->data[1] & 0xffff),
+ (ctx->data[1] >> 16) & 0xff,
+ ctx->data[1] & (1 << 30) ? "en" : "dis",
+ ctx->data[1] & (1 << 31) ? "solid pattern enabled, " : "",
+ ctx->data[1] & (1 << 31) ?
+ "mono pattern transparency enabled, " : "");
+
+}
+
+static int
+decode_2d(struct drm_intel_decode *ctx)
+{
+ unsigned int opcode, len;
+ uint32_t *data = ctx->data;
+
+ struct {
+ uint32_t opcode;
+ unsigned int min_len;
+ unsigned int max_len;
+ const char *name;
+ } opcodes_2d[] = {
+ { 0x40, 5, 5, "COLOR_BLT" },
+ { 0x43, 6, 6, "SRC_COPY_BLT" },
+ { 0x01, 8, 8, "XY_SETUP_BLT" },
+ { 0x11, 9, 9, "XY_SETUP_MONO_PATTERN_SL_BLT" },
+ { 0x03, 3, 3, "XY_SETUP_CLIP_BLT" },
+ { 0x24, 2, 2, "XY_PIXEL_BLT" },
+ { 0x25, 3, 3, "XY_SCANLINES_BLT" },
+ { 0x26, 4, 4, "Y_TEXT_BLT" },
+ { 0x31, 5, 134, "XY_TEXT_IMMEDIATE_BLT" },
+ { 0x50, 6, 6, "XY_COLOR_BLT" },
+ { 0x51, 6, 6, "XY_PAT_BLT" },
+ { 0x76, 8, 8, "XY_PAT_CHROMA_BLT" },
+ { 0x72, 7, 135, "XY_PAT_BLT_IMMEDIATE" },
+ { 0x77, 9, 137, "XY_PAT_CHROMA_BLT_IMMEDIATE" },
+ { 0x52, 9, 9, "XY_MONO_PAT_BLT" },
+ { 0x59, 7, 7, "XY_MONO_PAT_FIXED_BLT" },
+ { 0x53, 8, 8, "XY_SRC_COPY_BLT" },
+ { 0x54, 8, 8, "XY_MONO_SRC_COPY_BLT" },
+ { 0x71, 9, 137, "XY_MONO_SRC_COPY_IMMEDIATE_BLT" },
+ { 0x55, 9, 9, "XY_FULL_BLT" },
+ { 0x55, 9, 137, "XY_FULL_IMMEDIATE_PATTERN_BLT" },
+ { 0x56, 9, 9, "XY_FULL_MONO_SRC_BLT" },
+ { 0x75, 10, 138, "XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT" },
+ { 0x57, 12, 12, "XY_FULL_MONO_PATTERN_BLT" },
+ { 0x58, 12, 12, "XY_FULL_MONO_PATTERN_MONO_SRC_BLT"},
+ };
+
+ switch ((data[0] & 0x1fc00000) >> 22) {
+ case 0x25:
+ instr_out(ctx, 0,
+ "XY_SCANLINES_BLT (pattern seed (%d, %d), dst tile %d)\n",
+ (data[0] >> 12) & 0x8,
+ (data[0] >> 8) & 0x8, (data[0] >> 11) & 1);
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 3)
+ fprintf(out, "Bad count in XY_SCANLINES_BLT\n");
+
+ instr_out(ctx, 1, "dest (%d,%d)\n",
+ data[1] & 0xffff, data[1] >> 16);
+ instr_out(ctx, 2, "dest (%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ return len;
+ case 0x01:
+ decode_2d_br00(ctx, "XY_SETUP_BLT");
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 8)
+ fprintf(out, "Bad count in XY_SETUP_BLT\n");
+
+ decode_2d_br01(ctx);
+ instr_out(ctx, 2, "cliprect (%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ instr_out(ctx, 3, "cliprect (%d,%d)\n",
+ data[3] & 0xffff, data[3] >> 16);
+ instr_out(ctx, 4, "setup dst offset 0x%08x\n",
+ data[4]);
+ instr_out(ctx, 5, "setup background color\n");
+ instr_out(ctx, 6, "setup foreground color\n");
+ instr_out(ctx, 7, "color pattern offset\n");
+ return len;
+ case 0x03:
+ decode_2d_br00(ctx, "XY_SETUP_CLIP_BLT");
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 3)
+ fprintf(out, "Bad count in XY_SETUP_CLIP_BLT\n");
+
+ instr_out(ctx, 1, "cliprect (%d,%d)\n",
+ data[1] & 0xffff, data[2] >> 16);
+ instr_out(ctx, 2, "cliprect (%d,%d)\n",
+ data[2] & 0xffff, data[3] >> 16);
+ return len;
+ case 0x11:
+ decode_2d_br00(ctx, "XY_SETUP_MONO_PATTERN_SL_BLT");
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 9)
+ fprintf(out,
+ "Bad count in XY_SETUP_MONO_PATTERN_SL_BLT\n");
+
+ decode_2d_br01(ctx);
+ instr_out(ctx, 2, "cliprect (%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ instr_out(ctx, 3, "cliprect (%d,%d)\n",
+ data[3] & 0xffff, data[3] >> 16);
+ instr_out(ctx, 4, "setup dst offset 0x%08x\n",
+ data[4]);
+ instr_out(ctx, 5, "setup background color\n");
+ instr_out(ctx, 6, "setup foreground color\n");
+ instr_out(ctx, 7, "mono pattern dw0\n");
+ instr_out(ctx, 8, "mono pattern dw1\n");
+ return len;
+ case 0x50:
+ decode_2d_br00(ctx, "XY_COLOR_BLT");
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 6)
+ fprintf(out, "Bad count in XY_COLOR_BLT\n");
+
+ decode_2d_br01(ctx);
+ instr_out(ctx, 2, "(%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ instr_out(ctx, 3, "(%d,%d)\n",
+ data[3] & 0xffff, data[3] >> 16);
+ instr_out(ctx, 4, "offset 0x%08x\n", data[4]);
+ instr_out(ctx, 5, "color\n");
+ return len;
+ case 0x53:
+ decode_2d_br00(ctx, "XY_SRC_COPY_BLT");
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 8)
+ fprintf(out, "Bad count in XY_SRC_COPY_BLT\n");
+
+ decode_2d_br01(ctx);
+ instr_out(ctx, 2, "dst (%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ instr_out(ctx, 3, "dst (%d,%d)\n",
+ data[3] & 0xffff, data[3] >> 16);
+ instr_out(ctx, 4, "dst offset 0x%08x\n", data[4]);
+ instr_out(ctx, 5, "src (%d,%d)\n",
+ data[5] & 0xffff, data[5] >> 16);
+ instr_out(ctx, 6, "src pitch %d\n",
+ (short)(data[6] & 0xffff));
+ instr_out(ctx, 7, "src offset 0x%08x\n", data[7]);
+ return len;
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_2d) / sizeof(opcodes_2d[0]);
+ opcode++) {
+ if ((data[0] & 0x1fc00000) >> 22 == opcodes_2d[opcode].opcode) {
+ unsigned int i;
+
+ len = 1;
+ instr_out(ctx, 0, "%s\n",
+ opcodes_2d[opcode].name);
+ if (opcodes_2d[opcode].max_len > 1) {
+ len = (data[0] & 0x000000ff) + 2;
+ if (len < opcodes_2d[opcode].min_len ||
+ len > opcodes_2d[opcode].max_len) {
+ fprintf(out, "Bad count in %s\n",
+ opcodes_2d[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ instr_out(ctx, i, "dword %d\n", i);
+ }
+
+ return len;
+ }
+ }
+
+ instr_out(ctx, 0, "2D UNKNOWN\n");
+ return 1;
+}
+
+static int
+decode_3d_1c(struct drm_intel_decode *ctx)
+{
+ uint32_t *data = ctx->data;
+ uint32_t opcode;
+
+ opcode = (data[0] & 0x00f80000) >> 19;
+
+ switch (opcode) {
+ case 0x11:
+ instr_out(ctx, 0,
+ "3DSTATE_DEPTH_SUBRECTANGLE_DISABLE\n");
+ return 1;
+ case 0x10:
+ instr_out(ctx, 0, "3DSTATE_SCISSOR_ENABLE %s\n",
+ data[0] & 1 ? "enabled" : "disabled");
+ return 1;
+ case 0x01:
+ instr_out(ctx, 0, "3DSTATE_MAP_COORD_SET_I830\n");
+ return 1;
+ case 0x0a:
+ instr_out(ctx, 0, "3DSTATE_MAP_CUBE_I830\n");
+ return 1;
+ case 0x05:
+ instr_out(ctx, 0, "3DSTATE_MAP_TEX_STREAM_I830\n");
+ return 1;
+ }
+
+ instr_out(ctx, 0, "3D UNKNOWN: 3d_1c opcode = 0x%x\n",
+ opcode);
+ return 1;
+}
+
+/** Sets the string dstname to describe the destination of the PS instruction */
+static void
+i915_get_instruction_dst(uint32_t *data, int i, char *dstname, int do_mask)
+{
+ uint32_t a0 = data[i];
+ int dst_nr = (a0 >> 14) & 0xf;
+ char dstmask[8];
+ const char *sat;
+
+ if (do_mask) {
+ if (((a0 >> 10) & 0xf) == 0xf) {
+ dstmask[0] = 0;
+ } else {
+ int dstmask_index = 0;
+
+ dstmask[dstmask_index++] = '.';
+ if (a0 & (1 << 10))
+ dstmask[dstmask_index++] = 'x';
+ if (a0 & (1 << 11))
+ dstmask[dstmask_index++] = 'y';
+ if (a0 & (1 << 12))
+ dstmask[dstmask_index++] = 'z';
+ if (a0 & (1 << 13))
+ dstmask[dstmask_index++] = 'w';
+ dstmask[dstmask_index++] = 0;
+ }
+
+ if (a0 & (1 << 22))
+ sat = ".sat";
+ else
+ sat = "";
+ } else {
+ dstmask[0] = 0;
+ sat = "";
+ }
+
+ switch ((a0 >> 19) & 0x7) {
+ case 0:
+ if (dst_nr > 15)
+ fprintf(out, "bad destination reg R%d\n", dst_nr);
+ sprintf(dstname, "R%d%s%s", dst_nr, dstmask, sat);
+ break;
+ case 4:
+ if (dst_nr > 0)
+ fprintf(out, "bad destination reg oC%d\n", dst_nr);
+ sprintf(dstname, "oC%s%s", dstmask, sat);
+ break;
+ case 5:
+ if (dst_nr > 0)
+ fprintf(out, "bad destination reg oD%d\n", dst_nr);
+ sprintf(dstname, "oD%s%s", dstmask, sat);
+ break;
+ case 6:
+ if (dst_nr > 3)
+ fprintf(out, "bad destination reg U%d\n", dst_nr);
+ sprintf(dstname, "U%d%s%s", dst_nr, dstmask, sat);
+ break;
+ default:
+ sprintf(dstname, "RESERVED");
+ break;
+ }
+}
+
+static const char *
+i915_get_channel_swizzle(uint32_t select)
+{
+ switch (select & 0x7) {
+ case 0:
+ return (select & 8) ? "-x" : "x";
+ case 1:
+ return (select & 8) ? "-y" : "y";
+ case 2:
+ return (select & 8) ? "-z" : "z";
+ case 3:
+ return (select & 8) ? "-w" : "w";
+ case 4:
+ return (select & 8) ? "-0" : "0";
+ case 5:
+ return (select & 8) ? "-1" : "1";
+ default:
+ return (select & 8) ? "-bad" : "bad";
+ }
+}
+
+static void
+i915_get_instruction_src_name(uint32_t src_type, uint32_t src_nr, char *name)
+{
+ switch (src_type) {
+ case 0:
+ sprintf(name, "R%d", src_nr);
+ if (src_nr > 15)
+ fprintf(out, "bad src reg %s\n", name);
+ break;
+ case 1:
+ if (src_nr < 8)
+ sprintf(name, "T%d", src_nr);
+ else if (src_nr == 8)
+ sprintf(name, "DIFFUSE");
+ else if (src_nr == 9)
+ sprintf(name, "SPECULAR");
+ else if (src_nr == 10)
+ sprintf(name, "FOG");
+ else {
+ fprintf(out, "bad src reg T%d\n", src_nr);
+ sprintf(name, "RESERVED");
+ }
+ break;
+ case 2:
+ sprintf(name, "C%d", src_nr);
+ if (src_nr > 31)
+ fprintf(out, "bad src reg %s\n", name);
+ break;
+ case 4:
+ sprintf(name, "oC");
+ if (src_nr > 0)
+ fprintf(out, "bad src reg oC%d\n", src_nr);
+ break;
+ case 5:
+ sprintf(name, "oD");
+ if (src_nr > 0)
+ fprintf(out, "bad src reg oD%d\n", src_nr);
+ break;
+ case 6:
+ sprintf(name, "U%d", src_nr);
+ if (src_nr > 3)
+ fprintf(out, "bad src reg %s\n", name);
+ break;
+ default:
+ fprintf(out, "bad src reg type %d\n", src_type);
+ sprintf(name, "RESERVED");
+ break;
+ }
+}
+
+static void i915_get_instruction_src0(uint32_t *data, int i, char *srcname)
+{
+ uint32_t a0 = data[i];
+ uint32_t a1 = data[i + 1];
+ int src_nr = (a0 >> 2) & 0x1f;
+ const char *swizzle_x = i915_get_channel_swizzle((a1 >> 28) & 0xf);
+ const char *swizzle_y = i915_get_channel_swizzle((a1 >> 24) & 0xf);
+ const char *swizzle_z = i915_get_channel_swizzle((a1 >> 20) & 0xf);
+ const char *swizzle_w = i915_get_channel_swizzle((a1 >> 16) & 0xf);
+ char swizzle[100];
+
+ i915_get_instruction_src_name((a0 >> 7) & 0x7, src_nr, srcname);
+ sprintf(swizzle, ".%s%s%s%s", swizzle_x, swizzle_y, swizzle_z,
+ swizzle_w);
+ if (strcmp(swizzle, ".xyzw") != 0)
+ strcat(srcname, swizzle);
+}
+
+static void i915_get_instruction_src1(uint32_t *data, int i, char *srcname)
+{
+ uint32_t a1 = data[i + 1];
+ uint32_t a2 = data[i + 2];
+ int src_nr = (a1 >> 8) & 0x1f;
+ const char *swizzle_x = i915_get_channel_swizzle((a1 >> 4) & 0xf);
+ const char *swizzle_y = i915_get_channel_swizzle((a1 >> 0) & 0xf);
+ const char *swizzle_z = i915_get_channel_swizzle((a2 >> 28) & 0xf);
+ const char *swizzle_w = i915_get_channel_swizzle((a2 >> 24) & 0xf);
+ char swizzle[100];
+
+ i915_get_instruction_src_name((a1 >> 13) & 0x7, src_nr, srcname);
+ sprintf(swizzle, ".%s%s%s%s", swizzle_x, swizzle_y, swizzle_z,
+ swizzle_w);
+ if (strcmp(swizzle, ".xyzw") != 0)
+ strcat(srcname, swizzle);
+}
+
+static void i915_get_instruction_src2(uint32_t *data, int i, char *srcname)
+{
+ uint32_t a2 = data[i + 2];
+ int src_nr = (a2 >> 16) & 0x1f;
+ const char *swizzle_x = i915_get_channel_swizzle((a2 >> 12) & 0xf);
+ const char *swizzle_y = i915_get_channel_swizzle((a2 >> 8) & 0xf);
+ const char *swizzle_z = i915_get_channel_swizzle((a2 >> 4) & 0xf);
+ const char *swizzle_w = i915_get_channel_swizzle((a2 >> 0) & 0xf);
+ char swizzle[100];
+
+ i915_get_instruction_src_name((a2 >> 21) & 0x7, src_nr, srcname);
+ sprintf(swizzle, ".%s%s%s%s", swizzle_x, swizzle_y, swizzle_z,
+ swizzle_w);
+ if (strcmp(swizzle, ".xyzw") != 0)
+ strcat(srcname, swizzle);
+}
+
+static void
+i915_get_instruction_addr(uint32_t src_type, uint32_t src_nr, char *name)
+{
+ switch (src_type) {
+ case 0:
+ sprintf(name, "R%d", src_nr);
+ if (src_nr > 15)
+ fprintf(out, "bad src reg %s\n", name);
+ break;
+ case 1:
+ if (src_nr < 8)
+ sprintf(name, "T%d", src_nr);
+ else if (src_nr == 8)
+ sprintf(name, "DIFFUSE");
+ else if (src_nr == 9)
+ sprintf(name, "SPECULAR");
+ else if (src_nr == 10)
+ sprintf(name, "FOG");
+ else {
+ fprintf(out, "bad src reg T%d\n", src_nr);
+ sprintf(name, "RESERVED");
+ }
+ break;
+ case 4:
+ sprintf(name, "oC");
+ if (src_nr > 0)
+ fprintf(out, "bad src reg oC%d\n", src_nr);
+ break;
+ case 5:
+ sprintf(name, "oD");
+ if (src_nr > 0)
+ fprintf(out, "bad src reg oD%d\n", src_nr);
+ break;
+ default:
+ fprintf(out, "bad src reg type %d\n", src_type);
+ sprintf(name, "RESERVED");
+ break;
+ }
+}
+
+static void
+i915_decode_alu1(struct drm_intel_decode *ctx,
+ int i, char *instr_prefix, const char *op_name)
+{
+ char dst[100], src0[100];
+
+ i915_get_instruction_dst(ctx->data, i, dst, 1);
+ i915_get_instruction_src0(ctx->data, i, src0);
+
+ instr_out(ctx, i++, "%s: %s %s, %s\n", instr_prefix,
+ op_name, dst, src0);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+}
+
+static void
+i915_decode_alu2(struct drm_intel_decode *ctx,
+ int i, char *instr_prefix, const char *op_name)
+{
+ char dst[100], src0[100], src1[100];
+
+ i915_get_instruction_dst(ctx->data, i, dst, 1);
+ i915_get_instruction_src0(ctx->data, i, src0);
+ i915_get_instruction_src1(ctx->data, i, src1);
+
+ instr_out(ctx, i++, "%s: %s %s, %s, %s\n", instr_prefix,
+ op_name, dst, src0, src1);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+}
+
+static void
+i915_decode_alu3(struct drm_intel_decode *ctx,
+ int i, char *instr_prefix, const char *op_name)
+{
+ char dst[100], src0[100], src1[100], src2[100];
+
+ i915_get_instruction_dst(ctx->data, i, dst, 1);
+ i915_get_instruction_src0(ctx->data, i, src0);
+ i915_get_instruction_src1(ctx->data, i, src1);
+ i915_get_instruction_src2(ctx->data, i, src2);
+
+ instr_out(ctx, i++, "%s: %s %s, %s, %s, %s\n", instr_prefix,
+ op_name, dst, src0, src1, src2);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+}
+
+static void
+i915_decode_tex(struct drm_intel_decode *ctx, int i,
+ const char *instr_prefix, const char *tex_name)
+{
+ uint32_t t0 = ctx->data[i];
+ uint32_t t1 = ctx->data[i + 1];
+ char dst_name[100];
+ char addr_name[100];
+ int sampler_nr;
+
+ i915_get_instruction_dst(ctx->data, i, dst_name, 0);
+ i915_get_instruction_addr((t1 >> 24) & 0x7,
+ (t1 >> 17) & 0xf, addr_name);
+ sampler_nr = t0 & 0xf;
+
+ instr_out(ctx, i++, "%s: %s %s, S%d, %s\n", instr_prefix,
+ tex_name, dst_name, sampler_nr, addr_name);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+}
+
+static void
+i915_decode_dcl(struct drm_intel_decode *ctx, int i, char *instr_prefix)
+{
+ uint32_t d0 = ctx->data[i];
+ const char *sampletype;
+ int dcl_nr = (d0 >> 14) & 0xf;
+ const char *dcl_x = d0 & (1 << 10) ? "x" : "";
+ const char *dcl_y = d0 & (1 << 11) ? "y" : "";
+ const char *dcl_z = d0 & (1 << 12) ? "z" : "";
+ const char *dcl_w = d0 & (1 << 13) ? "w" : "";
+ char dcl_mask[10];
+
+ switch ((d0 >> 19) & 0x3) {
+ case 1:
+ sprintf(dcl_mask, ".%s%s%s%s", dcl_x, dcl_y, dcl_z, dcl_w);
+ if (strcmp(dcl_mask, ".") == 0)
+ fprintf(out, "bad (empty) dcl mask\n");
+
+ if (dcl_nr > 10)
+ fprintf(out, "bad T%d dcl register number\n", dcl_nr);
+ if (dcl_nr < 8) {
+ if (strcmp(dcl_mask, ".x") != 0 &&
+ strcmp(dcl_mask, ".xy") != 0 &&
+ strcmp(dcl_mask, ".xz") != 0 &&
+ strcmp(dcl_mask, ".w") != 0 &&
+ strcmp(dcl_mask, ".xyzw") != 0) {
+ fprintf(out, "bad T%d.%s dcl mask\n", dcl_nr,
+ dcl_mask);
+ }
+ instr_out(ctx, i++, "%s: DCL T%d%s\n",
+ instr_prefix, dcl_nr, dcl_mask);
+ } else {
+ if (strcmp(dcl_mask, ".xz") == 0)
+ fprintf(out, "errataed bad dcl mask %s\n",
+ dcl_mask);
+ else if (strcmp(dcl_mask, ".xw") == 0)
+ fprintf(out, "errataed bad dcl mask %s\n",
+ dcl_mask);
+ else if (strcmp(dcl_mask, ".xzw") == 0)
+ fprintf(out, "errataed bad dcl mask %s\n",
+ dcl_mask);
+
+ if (dcl_nr == 8) {
+ instr_out(ctx, i++,
+ "%s: DCL DIFFUSE%s\n", instr_prefix,
+ dcl_mask);
+ } else if (dcl_nr == 9) {
+ instr_out(ctx, i++,
+ "%s: DCL SPECULAR%s\n", instr_prefix,
+ dcl_mask);
+ } else if (dcl_nr == 10) {
+ instr_out(ctx, i++,
+ "%s: DCL FOG%s\n", instr_prefix,
+ dcl_mask);
+ }
+ }
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ break;
+ case 3:
+ switch ((d0 >> 22) & 0x3) {
+ case 0:
+ sampletype = "2D";
+ break;
+ case 1:
+ sampletype = "CUBE";
+ break;
+ case 2:
+ sampletype = "3D";
+ break;
+ default:
+ sampletype = "RESERVED";
+ break;
+ }
+ if (dcl_nr > 15)
+ fprintf(out, "bad S%d dcl register number\n", dcl_nr);
+ instr_out(ctx, i++, "%s: DCL S%d %s\n",
+ instr_prefix, dcl_nr, sampletype);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ break;
+ default:
+ instr_out(ctx, i++, "%s: DCL RESERVED%d\n",
+ instr_prefix, dcl_nr);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ }
+}
+
+static void
+i915_decode_instruction(struct drm_intel_decode *ctx,
+ int i, char *instr_prefix)
+{
+ switch ((ctx->data[i] >> 24) & 0x1f) {
+ case 0x0:
+ instr_out(ctx, i++, "%s: NOP\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ break;
+ case 0x01:
+ i915_decode_alu2(ctx, i, instr_prefix, "ADD");
+ break;
+ case 0x02:
+ i915_decode_alu1(ctx, i, instr_prefix, "MOV");
+ break;
+ case 0x03:
+ i915_decode_alu2(ctx, i, instr_prefix, "MUL");
+ break;
+ case 0x04:
+ i915_decode_alu3(ctx, i, instr_prefix, "MAD");
+ break;
+ case 0x05:
+ i915_decode_alu3(ctx, i, instr_prefix, "DP2ADD");
+ break;
+ case 0x06:
+ i915_decode_alu2(ctx, i, instr_prefix, "DP3");
+ break;
+ case 0x07:
+ i915_decode_alu2(ctx, i, instr_prefix, "DP4");
+ break;
+ case 0x08:
+ i915_decode_alu1(ctx, i, instr_prefix, "FRC");
+ break;
+ case 0x09:
+ i915_decode_alu1(ctx, i, instr_prefix, "RCP");
+ break;
+ case 0x0a:
+ i915_decode_alu1(ctx, i, instr_prefix, "RSQ");
+ break;
+ case 0x0b:
+ i915_decode_alu1(ctx, i, instr_prefix, "EXP");
+ break;
+ case 0x0c:
+ i915_decode_alu1(ctx, i, instr_prefix, "LOG");
+ break;
+ case 0x0d:
+ i915_decode_alu2(ctx, i, instr_prefix, "CMP");
+ break;
+ case 0x0e:
+ i915_decode_alu2(ctx, i, instr_prefix, "MIN");
+ break;
+ case 0x0f:
+ i915_decode_alu2(ctx, i, instr_prefix, "MAX");
+ break;
+ case 0x10:
+ i915_decode_alu1(ctx, i, instr_prefix, "FLR");
+ break;
+ case 0x11:
+ i915_decode_alu1(ctx, i, instr_prefix, "MOD");
+ break;
+ case 0x12:
+ i915_decode_alu1(ctx, i, instr_prefix, "TRC");
+ break;
+ case 0x13:
+ i915_decode_alu2(ctx, i, instr_prefix, "SGE");
+ break;
+ case 0x14:
+ i915_decode_alu2(ctx, i, instr_prefix, "SLT");
+ break;
+ case 0x15:
+ i915_decode_tex(ctx, i, instr_prefix, "TEXLD");
+ break;
+ case 0x16:
+ i915_decode_tex(ctx, i, instr_prefix, "TEXLDP");
+ break;
+ case 0x17:
+ i915_decode_tex(ctx, i, instr_prefix, "TEXLDB");
+ break;
+ case 0x19:
+ i915_decode_dcl(ctx, i, instr_prefix);
+ break;
+ default:
+ instr_out(ctx, i++, "%s: unknown\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ instr_out(ctx, i++, "%s\n", instr_prefix);
+ break;
+ }
+}
+
+static const char *
+decode_compare_func(uint32_t op)
+{
+ switch (op & 0x7) {
+ case 0:
+ return "always";
+ case 1:
+ return "never";
+ case 2:
+ return "less";
+ case 3:
+ return "equal";
+ case 4:
+ return "lequal";
+ case 5:
+ return "greater";
+ case 6:
+ return "notequal";
+ case 7:
+ return "gequal";
+ }
+ return "";
+}
+
+static const char *
+decode_stencil_op(uint32_t op)
+{
+ switch (op & 0x7) {
+ case 0:
+ return "keep";
+ case 1:
+ return "zero";
+ case 2:
+ return "replace";
+ case 3:
+ return "incr_sat";
+ case 4:
+ return "decr_sat";
+ case 5:
+ return "greater";
+ case 6:
+ return "incr";
+ case 7:
+ return "decr";
+ }
+ return "";
+}
+
+#if 0
+static const char *
+decode_logic_op(uint32_t op)
+{
+ switch (op & 0xf) {
+ case 0:
+ return "clear";
+ case 1:
+ return "nor";
+ case 2:
+ return "and_inv";
+ case 3:
+ return "copy_inv";
+ case 4:
+ return "and_rvrse";
+ case 5:
+ return "inv";
+ case 6:
+ return "xor";
+ case 7:
+ return "nand";
+ case 8:
+ return "and";
+ case 9:
+ return "equiv";
+ case 10:
+ return "noop";
+ case 11:
+ return "or_inv";
+ case 12:
+ return "copy";
+ case 13:
+ return "or_rvrse";
+ case 14:
+ return "or";
+ case 15:
+ return "set";
+ }
+ return "";
+}
+#endif
+
+static const char *
+decode_blend_fact(uint32_t op)
+{
+ switch (op & 0xf) {
+ case 1:
+ return "zero";
+ case 2:
+ return "one";
+ case 3:
+ return "src_colr";
+ case 4:
+ return "inv_src_colr";
+ case 5:
+ return "src_alpha";
+ case 6:
+ return "inv_src_alpha";
+ case 7:
+ return "dst_alpha";
+ case 8:
+ return "inv_dst_alpha";
+ case 9:
+ return "dst_colr";
+ case 10:
+ return "inv_dst_colr";
+ case 11:
+ return "src_alpha_sat";
+ case 12:
+ return "cnst_colr";
+ case 13:
+ return "inv_cnst_colr";
+ case 14:
+ return "cnst_alpha";
+ case 15:
+ return "inv_const_alpha";
+ }
+ return "";
+}
+
+static const char *
+decode_tex_coord_mode(uint32_t mode)
+{
+ switch (mode & 0x7) {
+ case 0:
+ return "wrap";
+ case 1:
+ return "mirror";
+ case 2:
+ return "clamp_edge";
+ case 3:
+ return "cube";
+ case 4:
+ return "clamp_border";
+ case 5:
+ return "mirror_once";
+ }
+ return "";
+}
+
+static const char *
+decode_sample_filter(uint32_t mode)
+{
+ switch (mode & 0x7) {
+ case 0:
+ return "nearest";
+ case 1:
+ return "linear";
+ case 2:
+ return "anisotropic";
+ case 3:
+ return "4x4_1";
+ case 4:
+ return "4x4_2";
+ case 5:
+ return "4x4_flat";
+ case 6:
+ return "6x5_mono";
+ }
+ return "";
+}
+
+static int
+decode_3d_1d(struct drm_intel_decode *ctx)
+{
+ unsigned int len, i, c, idx, word, map, sampler, instr;
+ const char *format, *zformat, *type;
+ uint32_t opcode;
+ uint32_t *data = ctx->data;
+ uint32_t devid = ctx->devid;
+
+ struct {
+ uint32_t opcode;
+ int i830_only;
+ unsigned int min_len;
+ unsigned int max_len;
+ const char *name;
+ } opcodes_3d_1d[] = {
+ { 0x86, 0, 4, 4, "3DSTATE_CHROMA_KEY" },
+ { 0x88, 0, 2, 2, "3DSTATE_CONSTANT_BLEND_COLOR" },
+ { 0x99, 0, 2, 2, "3DSTATE_DEFAULT_DIFFUSE" },
+ { 0x9a, 0, 2, 2, "3DSTATE_DEFAULT_SPECULAR" },
+ { 0x98, 0, 2, 2, "3DSTATE_DEFAULT_Z" },
+ { 0x97, 0, 2, 2, "3DSTATE_DEPTH_OFFSET_SCALE" },
+ { 0x9d, 0, 65, 65, "3DSTATE_FILTER_COEFFICIENTS_4X4" },
+ { 0x9e, 0, 4, 4, "3DSTATE_MONO_FILTER" },
+ { 0x89, 0, 4, 4, "3DSTATE_FOG_MODE" },
+ { 0x8f, 0, 2, 16, "3DSTATE_MAP_PALLETE_LOAD_32" },
+ { 0x83, 0, 2, 2, "3DSTATE_SPAN_STIPPLE" },
+ { 0x8c, 1, 2, 2, "3DSTATE_MAP_COORD_TRANSFORM_I830" },
+ { 0x8b, 1, 2, 2, "3DSTATE_MAP_VERTEX_TRANSFORM_I830" },
+ { 0x8d, 1, 3, 3, "3DSTATE_W_STATE_I830" },
+ { 0x01, 1, 2, 2, "3DSTATE_COLOR_FACTOR_I830" },
+ { 0x02, 1, 2, 2, "3DSTATE_MAP_COORD_SETBIND_I830"},
+ }, *opcode_3d_1d;
+
+ opcode = (data[0] & 0x00ff0000) >> 16;
+
+ switch (opcode) {
+ case 0x07:
+ /* This instruction is unusual. A 0 length means just
+ * 1 DWORD instead of 2. The 0 length is specified in
+ * one place to be unsupported, but stated to be
+ * required in another, and 0 length LOAD_INDIRECTs
+ * appear to cause no harm at least.
+ */
+ instr_out(ctx, 0, "3DSTATE_LOAD_INDIRECT\n");
+ len = (data[0] & 0x000000ff) + 1;
+ i = 1;
+ if (data[0] & (0x01 << 8)) {
+ instr_out(ctx, i++, "SIS.0\n");
+ instr_out(ctx, i++, "SIS.1\n");
+ }
+ if (data[0] & (0x02 << 8)) {
+ instr_out(ctx, i++, "DIS.0\n");
+ }
+ if (data[0] & (0x04 << 8)) {
+ instr_out(ctx, i++, "SSB.0\n");
+ instr_out(ctx, i++, "SSB.1\n");
+ }
+ if (data[0] & (0x08 << 8)) {
+ instr_out(ctx, i++, "MSB.0\n");
+ instr_out(ctx, i++, "MSB.1\n");
+ }
+ if (data[0] & (0x10 << 8)) {
+ instr_out(ctx, i++, "PSP.0\n");
+ instr_out(ctx, i++, "PSP.1\n");
+ }
+ if (data[0] & (0x20 << 8)) {
+ instr_out(ctx, i++, "PSC.0\n");
+ instr_out(ctx, i++, "PSC.1\n");
+ }
+ if (len != i) {
+ fprintf(out, "Bad count in 3DSTATE_LOAD_INDIRECT\n");
+ return len;
+ }
+ return len;
+ case 0x04:
+ instr_out(ctx, 0,
+ "3DSTATE_LOAD_STATE_IMMEDIATE_1\n");
+ len = (data[0] & 0x0000000f) + 2;
+ i = 1;
+ for (word = 0; word <= 8; word++) {
+ if (data[0] & (1 << (4 + word))) {
+ /* save vertex state for decode */
+ if (!IS_GEN2(devid)) {
+ int tex_num;
+
+ if (word == 2) {
+ saved_s2_set = 1;
+ saved_s2 = data[i];
+ }
+ if (word == 4) {
+ saved_s4_set = 1;
+ saved_s4 = data[i];
+ }
+
+ switch (word) {
+ case 0:
+ instr_out(ctx, i,
+ "S0: vbo offset: 0x%08x%s\n",
+ data[i] & (~1),
+ data[i] & 1 ?
+ ", auto cache invalidate disabled"
+ : "");
+ break;
+ case 1:
+ instr_out(ctx, i,
+ "S1: vertex width: %i, vertex pitch: %i\n",
+ (data[i] >> 24) &
+ 0x3f,
+ (data[i] >> 16) &
+ 0x3f);
+ break;
+ case 2:
+ instr_out(ctx, i,
+ "S2: texcoord formats: ");
+ for (tex_num = 0;
+ tex_num < 8; tex_num++) {
+ switch ((data[i] >>
+ tex_num *
+ 4) & 0xf) {
+ case 0:
+ fprintf(out,
+ "%i=2D ",
+ tex_num);
+ break;
+ case 1:
+ fprintf(out,
+ "%i=3D ",
+ tex_num);
+ break;
+ case 2:
+ fprintf(out,
+ "%i=4D ",
+ tex_num);
+ break;
+ case 3:
+ fprintf(out,
+ "%i=1D ",
+ tex_num);
+ break;
+ case 4:
+ fprintf(out,
+ "%i=2D_16 ",
+ tex_num);
+ break;
+ case 5:
+ fprintf(out,
+ "%i=4D_16 ",
+ tex_num);
+ break;
+ case 0xf:
+ fprintf(out,
+ "%i=NP ",
+ tex_num);
+ break;
+ }
+ }
+ fprintf(out, "\n");
+
+ break;
+ case 3:
+ instr_out(ctx, i,
+ "S3: not documented\n");
+ break;
+ case 4:
+ {
+ const char *cullmode = "";
+ const char *vfmt_xyzw = "";
+ switch ((data[i] >> 13)
+ & 0x3) {
+ case 0:
+ cullmode =
+ "both";
+ break;
+ case 1:
+ cullmode =
+ "none";
+ break;
+ case 2:
+ cullmode = "cw";
+ break;
+ case 3:
+ cullmode =
+ "ccw";
+ break;
+ }
+ switch (data[i] &
+ (7 << 6 | 1 <<
+ 2)) {
+ case 1 << 6:
+ vfmt_xyzw =
+ "XYZ,";
+ break;
+ case 2 << 6:
+ vfmt_xyzw =
+ "XYZW,";
+ break;
+ case 3 << 6:
+ vfmt_xyzw =
+ "XY,";
+ break;
+ case 4 << 6:
+ vfmt_xyzw =
+ "XYW,";
+ break;
+ case 1 << 6 | 1 << 2:
+ vfmt_xyzw =
+ "XYZF,";
+ break;
+ case 2 << 6 | 1 << 2:
+ vfmt_xyzw =
+ "XYZWF,";
+ break;
+ case 3 << 6 | 1 << 2:
+ vfmt_xyzw =
+ "XYF,";
+ break;
+ case 4 << 6 | 1 << 2:
+ vfmt_xyzw =
+ "XYWF,";
+ break;
+ }
+ instr_out(ctx, i,
+ "S4: point_width=%i, line_width=%.1f,"
+ "%s%s%s%s%s cullmode=%s, vfmt=%s%s%s%s%s%s "
+ "%s%s%s%s%s\n",
+ (data[i] >>
+ 23) & 0x1ff,
+ ((data[i] >>
+ 19) & 0xf) /
+ 2.0,
+ data[i] & (0xf
+ <<
+ 15)
+ ?
+ " flatshade="
+ : "",
+ data[i] & (1
+ <<
+ 18)
+ ? "Alpha," :
+ "",
+ data[i] & (1
+ <<
+ 17)
+ ? "Fog," : "",
+ data[i] & (1
+ <<
+ 16)
+ ? "Specular,"
+ : "",
+ data[i] & (1
+ <<
+ 15)
+ ? "Color," :
+ "", cullmode,
+ data[i] & (1
+ <<
+ 12)
+ ?
+ "PointWidth,"
+ : "",
+ data[i] & (1
+ <<
+ 11)
+ ? "SpecFog," :
+ "",
+ data[i] & (1
+ <<
+ 10)
+ ? "Color," :
+ "",
+ data[i] & (1
+ <<
+ 9)
+ ? "DepthOfs,"
+ : "",
+ vfmt_xyzw,
+ data[i] & (1
+ <<
+ 9)
+ ? "FogParam,"
+ : "",
+ data[i] & (1
+ <<
+ 5)
+ ?
+ "force default diffuse, "
+ : "",
+ data[i] & (1
+ <<
+ 4)
+ ?
+ "force default specular, "
+ : "",
+ data[i] & (1
+ <<
+ 3)
+ ?
+ "local depth ofs enable, "
+ : "",
+ data[i] & (1
+ <<
+ 1)
+ ?
+ "point sprite enable, "
+ : "",
+ data[i] & (1
+ <<
+ 0)
+ ?
+ "line AA enable, "
+ : "");
+ break;
+ }
+ case 5:
+ {
+ instr_out(ctx, i,
+ "S5:%s%s%s%s%s"
+ "%s%s%s%s stencil_ref=0x%x, stencil_test=%s, "
+ "stencil_fail=%s, stencil_pass_z_fail=%s, "
+ "stencil_pass_z_pass=%s, %s%s%s%s\n",
+ data[i] & (0xf
+ <<
+ 28)
+ ?
+ " write_disable="
+ : "",
+ data[i] & (1
+ <<
+ 31)
+ ? "Alpha," :
+ "",
+ data[i] & (1
+ <<
+ 30)
+ ? "Red," : "",
+ data[i] & (1
+ <<
+ 29)
+ ? "Green," :
+ "",
+ data[i] & (1
+ <<
+ 28)
+ ? "Blue," :
+ "",
+ data[i] & (1
+ <<
+ 27)
+ ?
+ " force default point size,"
+ : "",
+ data[i] & (1
+ <<
+ 26)
+ ?
+ " last pixel enable,"
+ : "",
+ data[i] & (1
+ <<
+ 25)
+ ?
+ " global depth ofs enable,"
+ : "",
+ data[i] & (1
+ <<
+ 24)
+ ?
+ " fog enable,"
+ : "",
+ (data[i] >>
+ 16) & 0xff,
+ decode_compare_func
+ (data[i] >>
+ 13),
+ decode_stencil_op
+ (data[i] >>
+ 10),
+ decode_stencil_op
+ (data[i] >>
+ 7),
+ decode_stencil_op
+ (data[i] >>
+ 4),
+ data[i] & (1
+ <<
+ 3)
+ ?
+ "stencil write enable, "
+ : "",
+ data[i] & (1
+ <<
+ 2)
+ ?
+ "stencil test enable, "
+ : "",
+ data[i] & (1
+ <<
+ 1)
+ ?
+ "color dither enable, "
+ : "",
+ data[i] & (1
+ <<
+ 0)
+ ?
+ "logicop enable, "
+ : "");
+ }
+ break;
+ case 6:
+ instr_out(ctx, i,
+ "S6: %salpha_test=%s, alpha_ref=0x%x, "
+ "depth_test=%s, %ssrc_blnd_fct=%s, dst_blnd_fct=%s, "
+ "%s%stristrip_provoking_vertex=%i\n",
+ data[i] & (1 << 31) ?
+ "alpha test enable, "
+ : "",
+ decode_compare_func
+ (data[i] >> 28),
+ data[i] & (0xff <<
+ 20),
+ decode_compare_func
+ (data[i] >> 16),
+ data[i] & (1 << 15) ?
+ "cbuf blend enable, "
+ : "",
+ decode_blend_fact(data
+ [i]
+ >>
+ 8),
+ decode_blend_fact(data
+ [i]
+ >>
+ 4),
+ data[i] & (1 << 3) ?
+ "depth write enable, "
+ : "",
+ data[i] & (1 << 2) ?
+ "cbuf write enable, "
+ : "",
+ data[i] & (0x3));
+ break;
+ case 7:
+ instr_out(ctx, i,
+ "S7: depth offset constant: 0x%08x\n",
+ data[i]);
+ break;
+ }
+ } else {
+ instr_out(ctx, i,
+ "S%d: 0x%08x\n", word, data[i]);
+ }
+ i++;
+ }
+ }
+ if (len != i) {
+ fprintf(out,
+ "Bad count in 3DSTATE_LOAD_STATE_IMMEDIATE_1\n");
+ }
+ return len;
+ case 0x03:
+ instr_out(ctx, 0,
+ "3DSTATE_LOAD_STATE_IMMEDIATE_2\n");
+ len = (data[0] & 0x0000000f) + 2;
+ i = 1;
+ for (word = 6; word <= 14; word++) {
+ if (data[0] & (1 << word)) {
+ if (word == 6)
+ instr_out(ctx, i++,
+ "TBCF\n");
+ else if (word >= 7 && word <= 10) {
+ instr_out(ctx, i++,
+ "TB%dC\n", word - 7);
+ instr_out(ctx, i++,
+ "TB%dA\n", word - 7);
+ } else if (word >= 11 && word <= 14) {
+ instr_out(ctx, i,
+ "TM%dS0: offset=0x%08x, %s\n",
+ word - 11,
+ data[i] & 0xfffffffe,
+ data[i] & 1 ? "use fence" :
+ "");
+ i++;
+ instr_out(ctx, i,
+ "TM%dS1: height=%i, width=%i, %s\n",
+ word - 11, data[i] >> 21,
+ (data[i] >> 10) & 0x3ff,
+ data[i] & 2 ? (data[i] & 1 ?
+ "y-tiled" :
+ "x-tiled") :
+ "");
+ i++;
+ instr_out(ctx, i,
+ "TM%dS2: pitch=%i, \n",
+ word - 11,
+ ((data[i] >> 21) + 1) * 4);
+ i++;
+ instr_out(ctx, i++,
+ "TM%dS3\n", word - 11);
+ instr_out(ctx, i++,
+ "TM%dS4: dflt color\n",
+ word - 11);
+ }
+ }
+ }
+ if (len != i) {
+ fprintf(out,
+ "Bad count in 3DSTATE_LOAD_STATE_IMMEDIATE_2\n");
+ }
+ return len;
+ case 0x00:
+ instr_out(ctx, 0, "3DSTATE_MAP_STATE\n");
+ len = (data[0] & 0x0000003f) + 2;
+ instr_out(ctx, 1, "mask\n");
+
+ i = 2;
+ for (map = 0; map <= 15; map++) {
+ if (data[1] & (1 << map)) {
+ int width, height, pitch, dword;
+ const char *tiling;
+
+ dword = data[i];
+ instr_out(ctx, i++,
+ "map %d MS2 %s%s%s\n", map,
+ dword & (1 << 31) ?
+ "untrusted surface, " : "",
+ dword & (1 << 1) ?
+ "vertical line stride enable, " : "",
+ dword & (1 << 0) ?
+ "vertical ofs enable, " : "");
+
+ dword = data[i];
+ width = ((dword >> 10) & ((1 << 11) - 1)) + 1;
+ height = ((dword >> 21) & ((1 << 11) - 1)) + 1;
+
+ tiling = "none";
+ if (dword & (1 << 2))
+ tiling = "fenced";
+ else if (dword & (1 << 1))
+ tiling = dword & (1 << 0) ? "Y" : "X";
+ type = " BAD";
+ format = "BAD";
+ switch ((dword >> 7) & 0x7) {
+ case 1:
+ type = "8b";
+ switch ((dword >> 3) & 0xf) {
+ case 0:
+ format = "I";
+ break;
+ case 1:
+ format = "L";
+ break;
+ case 4:
+ format = "A";
+ break;
+ case 5:
+ format = " mono";
+ break;
+ }
+ break;
+ case 2:
+ type = "16b";
+ switch ((dword >> 3) & 0xf) {
+ case 0:
+ format = " rgb565";
+ break;
+ case 1:
+ format = " argb1555";
+ break;
+ case 2:
+ format = " argb4444";
+ break;
+ case 5:
+ format = " ay88";
+ break;
+ case 6:
+ format = " bump655";
+ break;
+ case 7:
+ format = "I";
+ break;
+ case 8:
+ format = "L";
+ break;
+ case 9:
+ format = "A";
+ break;
+ }
+ break;
+ case 3:
+ type = "32b";
+ switch ((dword >> 3) & 0xf) {
+ case 0:
+ format = " argb8888";
+ break;
+ case 1:
+ format = " abgr8888";
+ break;
+ case 2:
+ format = " xrgb8888";
+ break;
+ case 3:
+ format = " xbgr8888";
+ break;
+ case 4:
+ format = " qwvu8888";
+ break;
+ case 5:
+ format = " axvu8888";
+ break;
+ case 6:
+ format = " lxvu8888";
+ break;
+ case 7:
+ format = " xlvu8888";
+ break;
+ case 8:
+ format = " argb2101010";
+ break;
+ case 9:
+ format = " abgr2101010";
+ break;
+ case 10:
+ format = " awvu2101010";
+ break;
+ case 11:
+ format = " gr1616";
+ break;
+ case 12:
+ format = " vu1616";
+ break;
+ case 13:
+ format = " xI824";
+ break;
+ case 14:
+ format = " xA824";
+ break;
+ case 15:
+ format = " xL824";
+ break;
+ }
+ break;
+ case 5:
+ type = "422";
+ switch ((dword >> 3) & 0xf) {
+ case 0:
+ format = " yuv_swapy";
+ break;
+ case 1:
+ format = " yuv";
+ break;
+ case 2:
+ format = " yuv_swapuv";
+ break;
+ case 3:
+ format = " yuv_swapuvy";
+ break;
+ }
+ break;
+ case 6:
+ type = "compressed";
+ switch ((dword >> 3) & 0x7) {
+ case 0:
+ format = " dxt1";
+ break;
+ case 1:
+ format = " dxt2_3";
+ break;
+ case 2:
+ format = " dxt4_5";
+ break;
+ case 3:
+ format = " fxt1";
+ break;
+ case 4:
+ format = " dxt1_rb";
+ break;
+ }
+ break;
+ case 7:
+ type = "4b indexed";
+ switch ((dword >> 3) & 0xf) {
+ case 7:
+ format = " argb8888";
+ break;
+ }
+ break;
+ }
+ dword = data[i];
+ instr_out(ctx, i++,
+ "map %d MS3 [width=%d, height=%d, format=%s%s, tiling=%s%s]\n",
+ map, width, height, type, format,
+ tiling,
+ dword & (1 << 9) ? " palette select" :
+ "");
+
+ dword = data[i];
+ pitch =
+ 4 * (((dword >> 21) & ((1 << 11) - 1)) + 1);
+ instr_out(ctx, i++,
+ "map %d MS4 [pitch=%d, max_lod=%i, vol_depth=%i, cube_face_ena=%x, %s]\n",
+ map, pitch, (dword >> 9) & 0x3f,
+ dword & 0xff, (dword >> 15) & 0x3f,
+ dword & (1 << 8) ? "miplayout legacy"
+ : "miplayout right");
+ }
+ }
+ if (len != i) {
+ fprintf(out, "Bad count in 3DSTATE_MAP_STATE\n");
+ return len;
+ }
+ return len;
+ case 0x06:
+ instr_out(ctx, 0,
+ "3DSTATE_PIXEL_SHADER_CONSTANTS\n");
+ len = (data[0] & 0x000000ff) + 2;
+
+ i = 2;
+ for (c = 0; c <= 31; c++) {
+ if (data[1] & (1 << c)) {
+ instr_out(ctx, i, "C%d.X = %f\n", c,
+ int_as_float(data[i]));
+ i++;
+ instr_out(ctx, i, "C%d.Y = %f\n",
+ c, int_as_float(data[i]));
+ i++;
+ instr_out(ctx, i, "C%d.Z = %f\n",
+ c, int_as_float(data[i]));
+ i++;
+ instr_out(ctx, i, "C%d.W = %f\n",
+ c, int_as_float(data[i]));
+ i++;
+ }
+ }
+ if (len != i) {
+ fprintf(out,
+ "Bad count in 3DSTATE_PIXEL_SHADER_CONSTANTS\n");
+ }
+ return len;
+ case 0x05:
+ instr_out(ctx, 0, "3DSTATE_PIXEL_SHADER_PROGRAM\n");
+ len = (data[0] & 0x000000ff) + 2;
+ if ((len - 1) % 3 != 0 || len > 370) {
+ fprintf(out,
+ "Bad count in 3DSTATE_PIXEL_SHADER_PROGRAM\n");
+ }
+ i = 1;
+ for (instr = 0; instr < (len - 1) / 3; instr++) {
+ char instr_prefix[10];
+
+ sprintf(instr_prefix, "PS%03d", instr);
+ i915_decode_instruction(ctx, i,
+ instr_prefix);
+ i += 3;
+ }
+ return len;
+ case 0x01:
+ if (IS_GEN2(devid))
+ break;
+ instr_out(ctx, 0, "3DSTATE_SAMPLER_STATE\n");
+ instr_out(ctx, 1, "mask\n");
+ len = (data[0] & 0x0000003f) + 2;
+ i = 2;
+ for (sampler = 0; sampler <= 15; sampler++) {
+ if (data[1] & (1 << sampler)) {
+ uint32_t dword;
+ const char *mip_filter = "";
+
+ dword = data[i];
+ switch ((dword >> 20) & 0x3) {
+ case 0:
+ mip_filter = "none";
+ break;
+ case 1:
+ mip_filter = "nearest";
+ break;
+ case 3:
+ mip_filter = "linear";
+ break;
+ }
+ instr_out(ctx, i++,
+ "sampler %d SS2:%s%s%s "
+ "base_mip_level=%i, mip_filter=%s, mag_filter=%s, min_filter=%s "
+ "lod_bias=%.2f,%s max_aniso=%i, shadow_func=%s\n",
+ sampler,
+ dword & (1 << 31) ? " reverse gamma,"
+ : "",
+ dword & (1 << 30) ? " packed2planar,"
+ : "",
+ dword & (1 << 29) ?
+ " colorspace conversion," : "",
+ (dword >> 22) & 0x1f, mip_filter,
+ decode_sample_filter(dword >> 17),
+ decode_sample_filter(dword >> 14),
+ ((dword >> 5) & 0x1ff) / (0x10 * 1.0),
+ dword & (1 << 4) ? " shadow," : "",
+ dword & (1 << 3) ? 4 : 2,
+ decode_compare_func(dword));
+ dword = data[i];
+ instr_out(ctx, i++,
+ "sampler %d SS3: min_lod=%.2f,%s "
+ "tcmode_x=%s, tcmode_y=%s, tcmode_z=%s,%s texmap_idx=%i,%s\n",
+ sampler,
+ ((dword >> 24) & 0xff) / (0x10 * 1.0),
+ dword & (1 << 17) ?
+ " kill pixel enable," : "",
+ decode_tex_coord_mode(dword >> 12),
+ decode_tex_coord_mode(dword >> 9),
+ decode_tex_coord_mode(dword >> 6),
+ dword & (1 << 5) ?
+ " normalized coords," : "",
+ (dword >> 1) & 0xf,
+ dword & (1 << 0) ? " deinterlacer," :
+ "");
+ dword = data[i];
+ instr_out(ctx, i++,
+ "sampler %d SS4: border color\n",
+ sampler);
+ }
+ }
+ if (len != i) {
+ fprintf(out, "Bad count in 3DSTATE_SAMPLER_STATE\n");
+ }
+ return len;
+ case 0x85:
+ len = (data[0] & 0x0000000f) + 2;
+
+ if (len != 2)
+ fprintf(out,
+ "Bad count in 3DSTATE_DEST_BUFFER_VARIABLES\n");
+
+ instr_out(ctx, 0,
+ "3DSTATE_DEST_BUFFER_VARIABLES\n");
+
+ switch ((data[1] >> 8) & 0xf) {
+ case 0x0:
+ format = "g8";
+ break;
+ case 0x1:
+ format = "x1r5g5b5";
+ break;
+ case 0x2:
+ format = "r5g6b5";
+ break;
+ case 0x3:
+ format = "a8r8g8b8";
+ break;
+ case 0x4:
+ format = "ycrcb_swapy";
+ break;
+ case 0x5:
+ format = "ycrcb_normal";
+ break;
+ case 0x6:
+ format = "ycrcb_swapuv";
+ break;
+ case 0x7:
+ format = "ycrcb_swapuvy";
+ break;
+ case 0x8:
+ format = "a4r4g4b4";
+ break;
+ case 0x9:
+ format = "a1r5g5b5";
+ break;
+ case 0xa:
+ format = "a2r10g10b10";
+ break;
+ default:
+ format = "BAD";
+ break;
+ }
+ switch ((data[1] >> 2) & 0x3) {
+ case 0x0:
+ zformat = "u16";
+ break;
+ case 0x1:
+ zformat = "f16";
+ break;
+ case 0x2:
+ zformat = "u24x8";
+ break;
+ default:
+ zformat = "BAD";
+ break;
+ }
+ instr_out(ctx, 1,
+ "%s format, %s depth format, early Z %sabled\n",
+ format, zformat,
+ (data[1] & (1 << 31)) ? "en" : "dis");
+ return len;
+
+ case 0x8e:
+ {
+ const char *name, *tiling;
+
+ len = (data[0] & 0x0000000f) + 2;
+ if (len != 3)
+ fprintf(out,
+ "Bad count in 3DSTATE_BUFFER_INFO\n");
+
+ switch ((data[1] >> 24) & 0x7) {
+ case 0x3:
+ name = "color";
+ break;
+ case 0x7:
+ name = "depth";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+
+ tiling = "none";
+ if (data[1] & (1 << 23))
+ tiling = "fenced";
+ else if (data[1] & (1 << 22))
+ tiling = data[1] & (1 << 21) ? "Y" : "X";
+
+ instr_out(ctx, 0, "3DSTATE_BUFFER_INFO\n");
+ instr_out(ctx, 1,
+ "%s, tiling = %s, pitch=%d\n", name, tiling,
+ data[1] & 0xffff);
+
+ instr_out(ctx, 2, "address\n");
+ return len;
+ }
+ case 0x81:
+ len = (data[0] & 0x0000000f) + 2;
+
+ if (len != 3)
+ fprintf(out,
+ "Bad count in 3DSTATE_SCISSOR_RECTANGLE\n");
+
+ instr_out(ctx, 0, "3DSTATE_SCISSOR_RECTANGLE\n");
+ instr_out(ctx, 1, "(%d,%d)\n",
+ data[1] & 0xffff, data[1] >> 16);
+ instr_out(ctx, 2, "(%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+
+ return len;
+ case 0x80:
+ len = (data[0] & 0x0000000f) + 2;
+
+ if (len != 5)
+ fprintf(out,
+ "Bad count in 3DSTATE_DRAWING_RECTANGLE\n");
+
+ instr_out(ctx, 0, "3DSTATE_DRAWING_RECTANGLE\n");
+ instr_out(ctx, 1, "%s\n",
+ data[1] & (1 << 30) ? "depth ofs disabled " : "");
+ instr_out(ctx, 2, "(%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ instr_out(ctx, 3, "(%d,%d)\n",
+ data[3] & 0xffff, data[3] >> 16);
+ instr_out(ctx, 4, "(%d,%d)\n",
+ data[4] & 0xffff, data[4] >> 16);
+
+ return len;
+ case 0x9c:
+ len = (data[0] & 0x0000000f) + 2;
+
+ if (len != 7)
+ fprintf(out, "Bad count in 3DSTATE_CLEAR_PARAMETERS\n");
+
+ instr_out(ctx, 0, "3DSTATE_CLEAR_PARAMETERS\n");
+ instr_out(ctx, 1, "prim_type=%s, clear=%s%s%s\n",
+ data[1] & (1 << 16) ? "CLEAR_RECT" : "ZONE_INIT",
+ data[1] & (1 << 2) ? "color," : "",
+ data[1] & (1 << 1) ? "depth," : "",
+ data[1] & (1 << 0) ? "stencil," : "");
+ instr_out(ctx, 2, "clear color\n");
+ instr_out(ctx, 3, "clear depth/stencil\n");
+ instr_out(ctx, 4, "color value (rgba8888)\n");
+ instr_out(ctx, 5, "depth value %f\n",
+ int_as_float(data[5]));
+ instr_out(ctx, 6, "clear stencil\n");
+ return len;
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(opcodes_3d_1d); idx++) {
+ opcode_3d_1d = &opcodes_3d_1d[idx];
+ if (opcode_3d_1d->i830_only && !IS_GEN2(devid))
+ continue;
+
+ if (((data[0] & 0x00ff0000) >> 16) == opcode_3d_1d->opcode) {
+ len = 1;
+
+ instr_out(ctx, 0, "%s\n",
+ opcode_3d_1d->name);
+ if (opcode_3d_1d->max_len > 1) {
+ len = (data[0] & 0x0000ffff) + 2;
+ if (len < opcode_3d_1d->min_len ||
+ len > opcode_3d_1d->max_len) {
+ fprintf(out, "Bad count in %s\n",
+ opcode_3d_1d->name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ instr_out(ctx, i, "dword %d\n", i);
+ }
+
+ return len;
+ }
+ }
+
+ instr_out(ctx, 0, "3D UNKNOWN: 3d_1d opcode = 0x%x\n",
+ opcode);
+ return 1;
+}
+
+static int
+decode_3d_primitive(struct drm_intel_decode *ctx)
+{
+ uint32_t *data = ctx->data;
+ uint32_t count = ctx->count;
+ char immediate = (data[0] & (1 << 23)) == 0;
+ unsigned int len, i, j, ret;
+ const char *primtype;
+ int original_s2 = saved_s2;
+ int original_s4 = saved_s4;
+
+ switch ((data[0] >> 18) & 0xf) {
+ case 0x0:
+ primtype = "TRILIST";
+ break;
+ case 0x1:
+ primtype = "TRISTRIP";
+ break;
+ case 0x2:
+ primtype = "TRISTRIP_REVERSE";
+ break;
+ case 0x3:
+ primtype = "TRIFAN";
+ break;
+ case 0x4:
+ primtype = "POLYGON";
+ break;
+ case 0x5:
+ primtype = "LINELIST";
+ break;
+ case 0x6:
+ primtype = "LINESTRIP";
+ break;
+ case 0x7:
+ primtype = "RECTLIST";
+ break;
+ case 0x8:
+ primtype = "POINTLIST";
+ break;
+ case 0x9:
+ primtype = "DIB";
+ break;
+ case 0xa:
+ primtype = "CLEAR_RECT";
+ saved_s4 = 3 << 6;
+ saved_s2 = ~0;
+ break;
+ default:
+ primtype = "unknown";
+ break;
+ }
+
+ /* XXX: 3DPRIM_DIB not supported */
+ if (immediate) {
+ len = (data[0] & 0x0003ffff) + 2;
+ instr_out(ctx, 0, "3DPRIMITIVE inline %s\n",
+ primtype);
+ if (count < len)
+ BUFFER_FAIL(count, len, "3DPRIMITIVE inline");
+ if (!saved_s2_set || !saved_s4_set) {
+ fprintf(out, "unknown vertex format\n");
+ for (i = 1; i < len; i++) {
+ instr_out(ctx, i,
+ " vertex data (%f float)\n",
+ int_as_float(data[i]));
+ }
+ } else {
+ unsigned int vertex = 0;
+ for (i = 1; i < len;) {
+ unsigned int tc;
+
+#define VERTEX_OUT(fmt, ...) do { \
+ if (i < len) \
+ instr_out(ctx, i, " V%d."fmt"\n", vertex, __VA_ARGS__); \
+ else \
+ fprintf(out, " missing data in V%d\n", vertex); \
+ i++; \
+} while (0)
+
+ VERTEX_OUT("X = %f", int_as_float(data[i]));
+ VERTEX_OUT("Y = %f", int_as_float(data[i]));
+ switch (saved_s4 >> 6 & 0x7) {
+ case 0x1:
+ VERTEX_OUT("Z = %f",
+ int_as_float(data[i]));
+ break;
+ case 0x2:
+ VERTEX_OUT("Z = %f",
+ int_as_float(data[i]));
+ VERTEX_OUT("W = %f",
+ int_as_float(data[i]));
+ break;
+ case 0x3:
+ break;
+ case 0x4:
+ VERTEX_OUT("W = %f",
+ int_as_float(data[i]));
+ break;
+ default:
+ fprintf(out, "bad S4 position mask\n");
+ }
+
+ if (saved_s4 & (1 << 10)) {
+ VERTEX_OUT
+ ("color = (A=0x%02x, R=0x%02x, G=0x%02x, "
+ "B=0x%02x)", data[i] >> 24,
+ (data[i] >> 16) & 0xff,
+ (data[i] >> 8) & 0xff,
+ data[i] & 0xff);
+ }
+ if (saved_s4 & (1 << 11)) {
+ VERTEX_OUT
+ ("spec = (A=0x%02x, R=0x%02x, G=0x%02x, "
+ "B=0x%02x)", data[i] >> 24,
+ (data[i] >> 16) & 0xff,
+ (data[i] >> 8) & 0xff,
+ data[i] & 0xff);
+ }
+ if (saved_s4 & (1 << 12))
+ VERTEX_OUT("width = 0x%08x)", data[i]);
+
+ for (tc = 0; tc <= 7; tc++) {
+ switch ((saved_s2 >> (tc * 4)) & 0xf) {
+ case 0x0:
+ VERTEX_OUT("T%d.X = %f", tc,
+ int_as_float(data
+ [i]));
+ VERTEX_OUT("T%d.Y = %f", tc,
+ int_as_float(data
+ [i]));
+ break;
+ case 0x1:
+ VERTEX_OUT("T%d.X = %f", tc,
+ int_as_float(data
+ [i]));
+ VERTEX_OUT("T%d.Y = %f", tc,
+ int_as_float(data
+ [i]));
+ VERTEX_OUT("T%d.Z = %f", tc,
+ int_as_float(data
+ [i]));
+ break;
+ case 0x2:
+ VERTEX_OUT("T%d.X = %f", tc,
+ int_as_float(data
+ [i]));
+ VERTEX_OUT("T%d.Y = %f", tc,
+ int_as_float(data
+ [i]));
+ VERTEX_OUT("T%d.Z = %f", tc,
+ int_as_float(data
+ [i]));
+ VERTEX_OUT("T%d.W = %f", tc,
+ int_as_float(data
+ [i]));
+ break;
+ case 0x3:
+ VERTEX_OUT("T%d.X = %f", tc,
+ int_as_float(data
+ [i]));
+ break;
+ case 0x4:
+ VERTEX_OUT
+ ("T%d.XY = 0x%08x half-float",
+ tc, data[i]);
+ break;
+ case 0x5:
+ VERTEX_OUT
+ ("T%d.XY = 0x%08x half-float",
+ tc, data[i]);
+ VERTEX_OUT
+ ("T%d.ZW = 0x%08x half-float",
+ tc, data[i]);
+ break;
+ case 0xf:
+ break;
+ default:
+ fprintf(out,
+ "bad S2.T%d format\n",
+ tc);
+ }
+ }
+ vertex++;
+ }
+ }
+
+ ret = len;
+ } else {
+ /* indirect vertices */
+ len = data[0] & 0x0000ffff; /* index count */
+ if (data[0] & (1 << 17)) {
+ /* random vertex access */
+ if (count < (len + 1) / 2 + 1) {
+ BUFFER_FAIL(count, (len + 1) / 2 + 1,
+ "3DPRIMITIVE random indirect");
+ }
+ instr_out(ctx, 0,
+ "3DPRIMITIVE random indirect %s (%d)\n",
+ primtype, len);
+ if (len == 0) {
+ /* vertex indices continue until 0xffff is
+ * found
+ */
+ for (i = 1; i < count; i++) {
+ if ((data[i] & 0xffff) == 0xffff) {
+ instr_out(ctx, i,
+ " indices: (terminator)\n");
+ ret = i;
+ goto out;
+ } else if ((data[i] >> 16) == 0xffff) {
+ instr_out(ctx, i,
+ " indices: 0x%04x, (terminator)\n",
+ data[i] & 0xffff);
+ ret = i;
+ goto out;
+ } else {
+ instr_out(ctx, i,
+ " indices: 0x%04x, 0x%04x\n",
+ data[i] & 0xffff,
+ data[i] >> 16);
+ }
+ }
+ fprintf(out,
+ "3DPRIMITIVE: no terminator found in index buffer\n");
+ ret = count;
+ goto out;
+ } else {
+ /* fixed size vertex index buffer */
+ for (j = 1, i = 0; i < len; i += 2, j++) {
+ if (i * 2 == len - 1) {
+ instr_out(ctx, j,
+ " indices: 0x%04x\n",
+ data[j] & 0xffff);
+ } else {
+ instr_out(ctx, j,
+ " indices: 0x%04x, 0x%04x\n",
+ data[j] & 0xffff,
+ data[j] >> 16);
+ }
+ }
+ }
+ ret = (len + 1) / 2 + 1;
+ goto out;
+ } else {
+ /* sequential vertex access */
+ instr_out(ctx, 0,
+ "3DPRIMITIVE sequential indirect %s, %d starting from "
+ "%d\n", primtype, len, data[1] & 0xffff);
+ instr_out(ctx, 1, " start\n");
+ ret = 2;
+ goto out;
+ }
+ }
+
+out:
+ saved_s2 = original_s2;
+ saved_s4 = original_s4;
+ return ret;
+}
+
+static int
+decode_3d(struct drm_intel_decode *ctx)
+{
+ uint32_t opcode;
+ unsigned int idx;
+ uint32_t *data = ctx->data;
+
+ struct {
+ uint32_t opcode;
+ unsigned int min_len;
+ unsigned int max_len;
+ const char *name;
+ } opcodes_3d[] = {
+ { 0x06, 1, 1, "3DSTATE_ANTI_ALIASING" },
+ { 0x08, 1, 1, "3DSTATE_BACKFACE_STENCIL_OPS" },
+ { 0x09, 1, 1, "3DSTATE_BACKFACE_STENCIL_MASKS" },
+ { 0x16, 1, 1, "3DSTATE_COORD_SET_BINDINGS" },
+ { 0x15, 1, 1, "3DSTATE_FOG_COLOR" },
+ { 0x0b, 1, 1, "3DSTATE_INDEPENDENT_ALPHA_BLEND" },
+ { 0x0d, 1, 1, "3DSTATE_MODES_4" },
+ { 0x0c, 1, 1, "3DSTATE_MODES_5" },
+ { 0x07, 1, 1, "3DSTATE_RASTERIZATION_RULES"},
+ }, *opcode_3d;
+
+ opcode = (data[0] & 0x1f000000) >> 24;
+
+ switch (opcode) {
+ case 0x1f:
+ return decode_3d_primitive(ctx);
+ case 0x1d:
+ return decode_3d_1d(ctx);
+ case 0x1c:
+ return decode_3d_1c(ctx);
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(opcodes_3d); idx++) {
+ opcode_3d = &opcodes_3d[idx];
+ if (opcode == opcode_3d->opcode) {
+ unsigned int len = 1, i;
+
+ instr_out(ctx, 0, "%s\n", opcode_3d->name);
+ if (opcode_3d->max_len > 1) {
+ len = (data[0] & 0xff) + 2;
+ if (len < opcode_3d->min_len ||
+ len > opcode_3d->max_len) {
+ fprintf(out, "Bad count in %s\n",
+ opcode_3d->name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ instr_out(ctx, i, "dword %d\n", i);
+ }
+ return len;
+ }
+ }
+
+ instr_out(ctx, 0, "3D UNKNOWN: 3d opcode = 0x%x\n", opcode);
+ return 1;
+}
+
+static const char *get_965_surfacetype(unsigned int surfacetype)
+{
+ switch (surfacetype) {
+ case 0:
+ return "1D";
+ case 1:
+ return "2D";
+ case 2:
+ return "3D";
+ case 3:
+ return "CUBE";
+ case 4:
+ return "BUFFER";
+ case 7:
+ return "NULL";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *get_965_depthformat(unsigned int depthformat)
+{
+ switch (depthformat) {
+ case 0:
+ return "s8_z24float";
+ case 1:
+ return "z32float";
+ case 2:
+ return "z24s8";
+ case 5:
+ return "z16";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *get_965_element_component(uint32_t data, int component)
+{
+ uint32_t component_control = (data >> (16 + (3 - component) * 4)) & 0x7;
+
+ switch (component_control) {
+ case 0:
+ return "nostore";
+ case 1:
+ switch (component) {
+ case 0:
+ return "X";
+ case 1:
+ return "Y";
+ case 2:
+ return "Z";
+ case 3:
+ return "W";
+ default:
+ return "fail";
+ }
+ case 2:
+ return "0.0";
+ case 3:
+ return "1.0";
+ case 4:
+ return "0x1";
+ case 5:
+ return "VID";
+ default:
+ return "fail";
+ }
+}
+
+static const char *get_965_prim_type(uint32_t primtype)
+{
+ switch (primtype) {
+ case 0x01:
+ return "point list";
+ case 0x02:
+ return "line list";
+ case 0x03:
+ return "line strip";
+ case 0x04:
+ return "tri list";
+ case 0x05:
+ return "tri strip";
+ case 0x06:
+ return "tri fan";
+ case 0x07:
+ return "quad list";
+ case 0x08:
+ return "quad strip";
+ case 0x09:
+ return "line list adj";
+ case 0x0a:
+ return "line strip adj";
+ case 0x0b:
+ return "tri list adj";
+ case 0x0c:
+ return "tri strip adj";
+ case 0x0d:
+ return "tri strip reverse";
+ case 0x0e:
+ return "polygon";
+ case 0x0f:
+ return "rect list";
+ case 0x10:
+ return "line loop";
+ case 0x11:
+ return "point list bf";
+ case 0x12:
+ return "line strip cont";
+ case 0x13:
+ return "line strip bf";
+ case 0x14:
+ return "line strip cont bf";
+ case 0x15:
+ return "tri fan no stipple";
+ default:
+ return "fail";
+ }
+}
+
+static int
+i965_decode_urb_fence(struct drm_intel_decode *ctx, int len)
+{
+ uint32_t vs_fence, clip_fence, gs_fence, sf_fence, vfe_fence, cs_fence;
+ uint32_t *data = ctx->data;
+
+ if (len != 3)
+ fprintf(out, "Bad count in URB_FENCE\n");
+
+ vs_fence = data[1] & 0x3ff;
+ gs_fence = (data[1] >> 10) & 0x3ff;
+ clip_fence = (data[1] >> 20) & 0x3ff;
+ sf_fence = data[2] & 0x3ff;
+ vfe_fence = (data[2] >> 10) & 0x3ff;
+ cs_fence = (data[2] >> 20) & 0x7ff;
+
+ instr_out(ctx, 0, "URB_FENCE: %s%s%s%s%s%s\n",
+ (data[0] >> 13) & 1 ? "cs " : "",
+ (data[0] >> 12) & 1 ? "vfe " : "",
+ (data[0] >> 11) & 1 ? "sf " : "",
+ (data[0] >> 10) & 1 ? "clip " : "",
+ (data[0] >> 9) & 1 ? "gs " : "",
+ (data[0] >> 8) & 1 ? "vs " : "");
+ instr_out(ctx, 1,
+ "vs fence: %d, clip_fence: %d, gs_fence: %d\n",
+ vs_fence, clip_fence, gs_fence);
+ instr_out(ctx, 2,
+ "sf fence: %d, vfe_fence: %d, cs_fence: %d\n",
+ sf_fence, vfe_fence, cs_fence);
+ if (gs_fence < vs_fence)
+ fprintf(out, "gs fence < vs fence!\n");
+ if (clip_fence < gs_fence)
+ fprintf(out, "clip fence < gs fence!\n");
+ if (sf_fence < clip_fence)
+ fprintf(out, "sf fence < clip fence!\n");
+ if (cs_fence < sf_fence)
+ fprintf(out, "cs fence < sf fence!\n");
+
+ return len;
+}
+
+static void
+state_base_out(struct drm_intel_decode *ctx, unsigned int index,
+ const char *name)
+{
+ if (ctx->data[index] & 1) {
+ instr_out(ctx, index,
+ "%s state base address 0x%08x\n", name,
+ ctx->data[index] & ~1);
+ } else {
+ instr_out(ctx, index, "%s state base not updated\n",
+ name);
+ }
+}
+
+static void
+state_max_out(struct drm_intel_decode *ctx, unsigned int index,
+ const char *name)
+{
+ if (ctx->data[index] & 1) {
+ if (ctx->data[index] == 1) {
+ instr_out(ctx, index,
+ "%s state upper bound disabled\n", name);
+ } else {
+ instr_out(ctx, index,
+ "%s state upper bound 0x%08x\n", name,
+ ctx->data[index] & ~1);
+ }
+ } else {
+ instr_out(ctx, index,
+ "%s state upper bound not updated\n", name);
+ }
+}
+
+static int
+gen7_3DSTATE_VIEWPORT_STATE_POINTERS_CC(struct drm_intel_decode *ctx)
+{
+ instr_out(ctx, 0, "3DSTATE_VIEWPORT_STATE_POINTERS_CC\n");
+ instr_out(ctx, 1, "pointer to CC viewport\n");
+
+ return 2;
+}
+
+static int
+gen7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP(struct drm_intel_decode *ctx)
+{
+ instr_out(ctx, 0, "3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP\n");
+ instr_out(ctx, 1, "pointer to SF_CLIP viewport\n");
+
+ return 2;
+}
+
+static int
+gen7_3DSTATE_BLEND_STATE_POINTERS(struct drm_intel_decode *ctx)
+{
+ instr_out(ctx, 0, "3DSTATE_BLEND_STATE_POINTERS\n");
+ instr_out(ctx, 1, "pointer to BLEND_STATE at 0x%08x (%s)\n",
+ ctx->data[1] & ~1,
+ (ctx->data[1] & 1) ? "changed" : "unchanged");
+
+ return 2;
+}
+
+static int
+gen7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS(struct drm_intel_decode *ctx)
+{
+ instr_out(ctx, 0, "3DSTATE_DEPTH_STENCIL_STATE_POINTERS\n");
+ instr_out(ctx, 1,
+ "pointer to DEPTH_STENCIL_STATE at 0x%08x (%s)\n",
+ ctx->data[1] & ~1,
+ (ctx->data[1] & 1) ? "changed" : "unchanged");
+
+ return 2;
+}
+
+static int
+gen7_3DSTATE_HIER_DEPTH_BUFFER(struct drm_intel_decode *ctx)
+{
+ instr_out(ctx, 0, "3DSTATE_HIER_DEPTH_BUFFER\n");
+ instr_out(ctx, 1, "pitch %db\n",
+ (ctx->data[1] & 0x1ffff) + 1);
+ instr_out(ctx, 2, "pointer to HiZ buffer\n");
+
+ return 3;
+}
+
+static int
+gen6_3DSTATE_CC_STATE_POINTERS(struct drm_intel_decode *ctx)
+{
+ instr_out(ctx, 0, "3DSTATE_CC_STATE_POINTERS\n");
+ instr_out(ctx, 1, "blend change %d\n", ctx->data[1] & 1);
+ instr_out(ctx, 2, "depth stencil change %d\n",
+ ctx->data[2] & 1);
+ instr_out(ctx, 3, "cc change %d\n", ctx->data[3] & 1);
+
+ return 4;
+}
+
+static int
+gen7_3DSTATE_CC_STATE_POINTERS(struct drm_intel_decode *ctx)
+{
+ instr_out(ctx, 0, "3DSTATE_CC_STATE_POINTERS\n");
+ instr_out(ctx, 1, "pointer to COLOR_CALC_STATE at 0x%08x "
+ "(%s)\n",
+ ctx->data[1] & ~1,
+ (ctx->data[1] & 1) ? "changed" : "unchanged");
+
+ return 2;
+}
+
+static int
+gen7_3DSTATE_URB_unit(struct drm_intel_decode *ctx, const char *unit)
+{
+ int start_kb = ((ctx->data[1] >> 25) & 0x3f) * 8;
+ /* the field is # of 512-bit rows - 1, we print bytes */
+ int entry_size = (((ctx->data[1] >> 16) & 0x1ff) + 1);
+ int nr_entries = ctx->data[1] & 0xffff;
+
+ instr_out(ctx, 0, "3DSTATE_URB_%s\n", unit);
+ instr_out(ctx, 1,
+ "%dKB start, size=%d 64B rows, nr_entries=%d, total size %dB\n",
+ start_kb, entry_size, nr_entries, nr_entries * 64 * entry_size);
+
+ return 2;
+}
+
+static int
+gen7_3DSTATE_URB_VS(struct drm_intel_decode *ctx)
+{
+ return gen7_3DSTATE_URB_unit(ctx, "VS");
+}
+
+static int
+gen7_3DSTATE_URB_HS(struct drm_intel_decode *ctx)
+{
+ return gen7_3DSTATE_URB_unit(ctx, "HS");
+}
+
+static int
+gen7_3DSTATE_URB_DS(struct drm_intel_decode *ctx)
+{
+ return gen7_3DSTATE_URB_unit(ctx, "DS");
+}
+
+static int
+gen7_3DSTATE_URB_GS(struct drm_intel_decode *ctx)
+{
+ return gen7_3DSTATE_URB_unit(ctx, "GS");
+}
+
+static int
+gen7_3DSTATE_CONSTANT(struct drm_intel_decode *ctx, const char *unit)
+{
+ int rlen[4];
+
+ rlen[0] = (ctx->data[1] >> 0) & 0xffff;
+ rlen[1] = (ctx->data[1] >> 16) & 0xffff;
+ rlen[2] = (ctx->data[2] >> 0) & 0xffff;
+ rlen[3] = (ctx->data[2] >> 16) & 0xffff;
+
+ instr_out(ctx, 0, "3DSTATE_CONSTANT_%s\n", unit);
+ instr_out(ctx, 1, "len 0 = %d, len 1 = %d\n", rlen[0], rlen[1]);
+ instr_out(ctx, 2, "len 2 = %d, len 3 = %d\n", rlen[2], rlen[3]);
+ instr_out(ctx, 3, "pointer to constbuf 0\n");
+ instr_out(ctx, 4, "pointer to constbuf 1\n");
+ instr_out(ctx, 5, "pointer to constbuf 2\n");
+ instr_out(ctx, 6, "pointer to constbuf 3\n");
+
+ return 7;
+}
+
+static int
+gen7_3DSTATE_CONSTANT_VS(struct drm_intel_decode *ctx)
+{
+ return gen7_3DSTATE_CONSTANT(ctx, "VS");
+}
+
+static int
+gen7_3DSTATE_CONSTANT_GS(struct drm_intel_decode *ctx)
+{
+ return gen7_3DSTATE_CONSTANT(ctx, "GS");
+}
+
+static int
+gen7_3DSTATE_CONSTANT_PS(struct drm_intel_decode *ctx)
+{
+ return gen7_3DSTATE_CONSTANT(ctx, "PS");
+}
+
+static int
+gen7_3DSTATE_CONSTANT_DS(struct drm_intel_decode *ctx)
+{
+ return gen7_3DSTATE_CONSTANT(ctx, "DS");
+}
+
+static int
+gen7_3DSTATE_CONSTANT_HS(struct drm_intel_decode *ctx)
+{
+ return gen7_3DSTATE_CONSTANT(ctx, "HS");
+}
+
+
+static int
+gen6_3DSTATE_WM(struct drm_intel_decode *ctx)
+{
+ instr_out(ctx, 0, "3DSTATE_WM\n");
+ instr_out(ctx, 1, "kernel start pointer 0\n");
+ instr_out(ctx, 2,
+ "SPF=%d, VME=%d, Sampler Count %d, "
+ "Binding table count %d\n",
+ (ctx->data[2] >> 31) & 1,
+ (ctx->data[2] >> 30) & 1,
+ (ctx->data[2] >> 27) & 7,
+ (ctx->data[2] >> 18) & 0xff);
+ instr_out(ctx, 3, "scratch offset\n");
+ instr_out(ctx, 4,
+ "Depth Clear %d, Depth Resolve %d, HiZ Resolve %d, "
+ "Dispatch GRF start[0] %d, start[1] %d, start[2] %d\n",
+ (ctx->data[4] & (1 << 30)) != 0,
+ (ctx->data[4] & (1 << 28)) != 0,
+ (ctx->data[4] & (1 << 27)) != 0,
+ (ctx->data[4] >> 16) & 0x7f,
+ (ctx->data[4] >> 8) & 0x7f,
+ (ctx->data[4] & 0x7f));
+ instr_out(ctx, 5,
+ "MaxThreads %d, PS KillPixel %d, PS computed Z %d, "
+ "PS use sourceZ %d, Thread Dispatch %d, PS use sourceW %d, "
+ "Dispatch32 %d, Dispatch16 %d, Dispatch8 %d\n",
+ ((ctx->data[5] >> 25) & 0x7f) + 1,
+ (ctx->data[5] & (1 << 22)) != 0,
+ (ctx->data[5] & (1 << 21)) != 0,
+ (ctx->data[5] & (1 << 20)) != 0,
+ (ctx->data[5] & (1 << 19)) != 0,
+ (ctx->data[5] & (1 << 8)) != 0,
+ (ctx->data[5] & (1 << 2)) != 0,
+ (ctx->data[5] & (1 << 1)) != 0,
+ (ctx->data[5] & (1 << 0)) != 0);
+ instr_out(ctx, 6,
+ "Num SF output %d, Pos XY offset %d, ZW interp mode %d , "
+ "Barycentric interp mode 0x%x, Point raster rule %d, "
+ "Multisample mode %d, "
+ "Multisample Dispatch mode %d\n",
+ (ctx->data[6] >> 20) & 0x3f,
+ (ctx->data[6] >> 18) & 3,
+ (ctx->data[6] >> 16) & 3,
+ (ctx->data[6] >> 10) & 0x3f,
+ (ctx->data[6] & (1 << 9)) != 0,
+ (ctx->data[6] >> 1) & 3,
+ (ctx->data[6] & 1));
+ instr_out(ctx, 7, "kernel start pointer 1\n");
+ instr_out(ctx, 8, "kernel start pointer 2\n");
+
+ return 9;
+}
+
+static int
+gen7_3DSTATE_WM(struct drm_intel_decode *ctx)
+{
+ const char *computed_depth = "";
+ const char *early_depth = "";
+ const char *zw_interp = "";
+
+ switch ((ctx->data[1] >> 23) & 0x3) {
+ case 0:
+ computed_depth = "";
+ break;
+ case 1:
+ computed_depth = "computed depth";
+ break;
+ case 2:
+ computed_depth = "computed depth >=";
+ break;
+ case 3:
+ computed_depth = "computed depth <=";
+ break;
+ }
+
+ switch ((ctx->data[1] >> 21) & 0x3) {
+ case 0:
+ early_depth = "";
+ break;
+ case 1:
+ early_depth = ", EDSC_PSEXEC";
+ break;
+ case 2:
+ early_depth = ", EDSC_PREPS";
+ break;
+ case 3:
+ early_depth = ", BAD EDSC";
+ break;
+ }
+
+ switch ((ctx->data[1] >> 17) & 0x3) {
+ case 0:
+ early_depth = "";
+ break;
+ case 1:
+ early_depth = ", BAD ZW interp";
+ break;
+ case 2:
+ early_depth = ", ZW centroid";
+ break;
+ case 3:
+ early_depth = ", ZW sample";
+ break;
+ }
+
+ instr_out(ctx, 0, "3DSTATE_WM\n");
+ instr_out(ctx, 1, "(%s%s%s%s%s%s)%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (ctx->data[1] & (1 << 11)) ? "PP " : "",
+ (ctx->data[1] & (1 << 12)) ? "PC " : "",
+ (ctx->data[1] & (1 << 13)) ? "PS " : "",
+ (ctx->data[1] & (1 << 14)) ? "NPP " : "",
+ (ctx->data[1] & (1 << 15)) ? "NPC " : "",
+ (ctx->data[1] & (1 << 16)) ? "NPS " : "",
+ (ctx->data[1] & (1 << 30)) ? ", depth clear" : "",
+ (ctx->data[1] & (1 << 29)) ? "" : ", disabled",
+ (ctx->data[1] & (1 << 28)) ? ", depth resolve" : "",
+ (ctx->data[1] & (1 << 27)) ? ", hiz resolve" : "",
+ (ctx->data[1] & (1 << 25)) ? ", kill" : "",
+ computed_depth,
+ early_depth,
+ zw_interp,
+ (ctx->data[1] & (1 << 20)) ? ", source depth" : "",
+ (ctx->data[1] & (1 << 19)) ? ", source W" : "",
+ (ctx->data[1] & (1 << 10)) ? ", coverage" : "",
+ (ctx->data[1] & (1 << 4)) ? ", poly stipple" : "",
+ (ctx->data[1] & (1 << 3)) ? ", line stipple" : "",
+ (ctx->data[1] & (1 << 2)) ? ", point UL" : ", point UR"
+ );
+ instr_out(ctx, 2, "MS\n");
+
+ return 3;
+}
+
+static int
+gen4_3DPRIMITIVE(struct drm_intel_decode *ctx)
+{
+ instr_out(ctx, 0,
+ "3DPRIMITIVE: %s %s\n",
+ get_965_prim_type((ctx->data[0] >> 10) & 0x1f),
+ (ctx->data[0] & (1 << 15)) ? "random" : "sequential");
+ instr_out(ctx, 1, "vertex count\n");
+ instr_out(ctx, 2, "start vertex\n");
+ instr_out(ctx, 3, "instance count\n");
+ instr_out(ctx, 4, "start instance\n");
+ instr_out(ctx, 5, "index bias\n");
+
+ return 6;
+}
+
+static int
+gen7_3DPRIMITIVE(struct drm_intel_decode *ctx)
+{
+ bool indirect = !!(ctx->data[0] & (1 << 10));
+
+ instr_out(ctx, 0,
+ "3DPRIMITIVE: %s%s\n",
+ indirect ? " indirect" : "",
+ (ctx->data[0] & (1 << 8)) ? " predicated" : "");
+ instr_out(ctx, 1, "%s %s\n",
+ get_965_prim_type(ctx->data[1] & 0x3f),
+ (ctx->data[1] & (1 << 8)) ? "random" : "sequential");
+ instr_out(ctx, 2, indirect ? "ignored" : "vertex count\n");
+ instr_out(ctx, 3, indirect ? "ignored" : "start vertex\n");
+ instr_out(ctx, 4, indirect ? "ignored" : "instance count\n");
+ instr_out(ctx, 5, indirect ? "ignored" : "start instance\n");
+ instr_out(ctx, 6, indirect ? "ignored" : "index bias\n");
+
+ return 7;
+}
+
+static int
+decode_3d_965(struct drm_intel_decode *ctx)
+{
+ uint32_t opcode;
+ unsigned int len;
+ unsigned int i, j, sba_len;
+ const char *desc1 = NULL;
+ uint32_t *data = ctx->data;
+ uint32_t devid = ctx->devid;
+
+ struct {
+ uint32_t opcode;
+ uint32_t len_mask;
+ int unsigned min_len;
+ int unsigned max_len;
+ const char *name;
+ int gen;
+ int (*func)(struct drm_intel_decode *ctx);
+ } opcodes_3d[] = {
+ { 0x6000, 0x00ff, 3, 3, "URB_FENCE" },
+ { 0x6001, 0xffff, 2, 2, "CS_URB_STATE" },
+ { 0x6002, 0x00ff, 2, 2, "CONSTANT_BUFFER" },
+ { 0x6101, 0xffff, 6, 10, "STATE_BASE_ADDRESS" },
+ { 0x6102, 0xffff, 2, 2, "STATE_SIP" },
+ { 0x6104, 0xffff, 1, 1, "3DSTATE_PIPELINE_SELECT" },
+ { 0x680b, 0xffff, 1, 1, "3DSTATE_VF_STATISTICS" },
+ { 0x6904, 0xffff, 1, 1, "3DSTATE_PIPELINE_SELECT" },
+ { 0x7800, 0xffff, 7, 7, "3DSTATE_PIPELINED_POINTERS" },
+ { 0x7801, 0x00ff, 4, 6, "3DSTATE_BINDING_TABLE_POINTERS" },
+ { 0x7802, 0x00ff, 4, 4, "3DSTATE_SAMPLER_STATE_POINTERS" },
+ { 0x7805, 0x00ff, 7, 7, "3DSTATE_DEPTH_BUFFER", 7 },
+ { 0x7805, 0x00ff, 3, 3, "3DSTATE_URB" },
+ { 0x7804, 0x00ff, 3, 3, "3DSTATE_CLEAR_PARAMS" },
+ { 0x7806, 0x00ff, 3, 3, "3DSTATE_STENCIL_BUFFER" },
+ { 0x790f, 0x00ff, 3, 3, "3DSTATE_HIER_DEPTH_BUFFER", 6 },
+ { 0x7807, 0x00ff, 3, 3, "3DSTATE_HIER_DEPTH_BUFFER", 7, gen7_3DSTATE_HIER_DEPTH_BUFFER },
+ { 0x7808, 0x00ff, 5, 257, "3DSTATE_VERTEX_BUFFERS" },
+ { 0x7809, 0x00ff, 3, 256, "3DSTATE_VERTEX_ELEMENTS" },
+ { 0x780a, 0x00ff, 3, 3, "3DSTATE_INDEX_BUFFER" },
+ { 0x780b, 0xffff, 1, 1, "3DSTATE_VF_STATISTICS" },
+ { 0x780d, 0x00ff, 4, 4, "3DSTATE_VIEWPORT_STATE_POINTERS" },
+ { 0x780e, 0xffff, 4, 4, NULL, 6, gen6_3DSTATE_CC_STATE_POINTERS },
+ { 0x780e, 0x00ff, 2, 2, NULL, 7, gen7_3DSTATE_CC_STATE_POINTERS },
+ { 0x780f, 0x00ff, 2, 2, "3DSTATE_SCISSOR_POINTERS" },
+ { 0x7810, 0x00ff, 6, 6, "3DSTATE_VS" },
+ { 0x7811, 0x00ff, 7, 7, "3DSTATE_GS" },
+ { 0x7812, 0x00ff, 4, 4, "3DSTATE_CLIP" },
+ { 0x7813, 0x00ff, 20, 20, "3DSTATE_SF", 6 },
+ { 0x7813, 0x00ff, 7, 7, "3DSTATE_SF", 7 },
+ { 0x7814, 0x00ff, 3, 3, "3DSTATE_WM", 7, gen7_3DSTATE_WM },
+ { 0x7814, 0x00ff, 9, 9, "3DSTATE_WM", 6, gen6_3DSTATE_WM },
+ { 0x7815, 0x00ff, 5, 5, "3DSTATE_CONSTANT_VS_STATE", 6 },
+ { 0x7815, 0x00ff, 7, 7, "3DSTATE_CONSTANT_VS", 7, gen7_3DSTATE_CONSTANT_VS },
+ { 0x7816, 0x00ff, 5, 5, "3DSTATE_CONSTANT_GS_STATE", 6 },
+ { 0x7816, 0x00ff, 7, 7, "3DSTATE_CONSTANT_GS", 7, gen7_3DSTATE_CONSTANT_GS },
+ { 0x7817, 0x00ff, 5, 5, "3DSTATE_CONSTANT_PS_STATE", 6 },
+ { 0x7817, 0x00ff, 7, 7, "3DSTATE_CONSTANT_PS", 7, gen7_3DSTATE_CONSTANT_PS },
+ { 0x7818, 0xffff, 2, 2, "3DSTATE_SAMPLE_MASK" },
+ { 0x7819, 0x00ff, 7, 7, "3DSTATE_CONSTANT_HS", 7, gen7_3DSTATE_CONSTANT_HS },
+ { 0x781a, 0x00ff, 7, 7, "3DSTATE_CONSTANT_DS", 7, gen7_3DSTATE_CONSTANT_DS },
+ { 0x781b, 0x00ff, 7, 7, "3DSTATE_HS" },
+ { 0x781c, 0x00ff, 4, 4, "3DSTATE_TE" },
+ { 0x781d, 0x00ff, 6, 6, "3DSTATE_DS" },
+ { 0x781e, 0x00ff, 3, 3, "3DSTATE_STREAMOUT" },
+ { 0x781f, 0x00ff, 14, 14, "3DSTATE_SBE" },
+ { 0x7820, 0x00ff, 8, 8, "3DSTATE_PS" },
+ { 0x7821, 0x00ff, 2, 2, NULL, 7, gen7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP },
+ { 0x7823, 0x00ff, 2, 2, NULL, 7, gen7_3DSTATE_VIEWPORT_STATE_POINTERS_CC },
+ { 0x7824, 0x00ff, 2, 2, NULL, 7, gen7_3DSTATE_BLEND_STATE_POINTERS },
+ { 0x7825, 0x00ff, 2, 2, NULL, 7, gen7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS },
+ { 0x7826, 0x00ff, 2, 2, "3DSTATE_BINDING_TABLE_POINTERS_VS" },
+ { 0x7827, 0x00ff, 2, 2, "3DSTATE_BINDING_TABLE_POINTERS_HS" },
+ { 0x7828, 0x00ff, 2, 2, "3DSTATE_BINDING_TABLE_POINTERS_DS" },
+ { 0x7829, 0x00ff, 2, 2, "3DSTATE_BINDING_TABLE_POINTERS_GS" },
+ { 0x782a, 0x00ff, 2, 2, "3DSTATE_BINDING_TABLE_POINTERS_PS" },
+ { 0x782b, 0x00ff, 2, 2, "3DSTATE_SAMPLER_STATE_POINTERS_VS" },
+ { 0x782c, 0x00ff, 2, 2, "3DSTATE_SAMPLER_STATE_POINTERS_HS" },
+ { 0x782d, 0x00ff, 2, 2, "3DSTATE_SAMPLER_STATE_POINTERS_DS" },
+ { 0x782e, 0x00ff, 2, 2, "3DSTATE_SAMPLER_STATE_POINTERS_GS" },
+ { 0x782f, 0x00ff, 2, 2, "3DSTATE_SAMPLER_STATE_POINTERS_PS" },
+ { 0x7830, 0x00ff, 2, 2, NULL, 7, gen7_3DSTATE_URB_VS },
+ { 0x7831, 0x00ff, 2, 2, NULL, 7, gen7_3DSTATE_URB_HS },
+ { 0x7832, 0x00ff, 2, 2, NULL, 7, gen7_3DSTATE_URB_DS },
+ { 0x7833, 0x00ff, 2, 2, NULL, 7, gen7_3DSTATE_URB_GS },
+ { 0x7900, 0xffff, 4, 4, "3DSTATE_DRAWING_RECTANGLE" },
+ { 0x7901, 0xffff, 5, 5, "3DSTATE_CONSTANT_COLOR" },
+ { 0x7905, 0xffff, 5, 7, "3DSTATE_DEPTH_BUFFER" },
+ { 0x7906, 0xffff, 2, 2, "3DSTATE_POLY_STIPPLE_OFFSET" },
+ { 0x7907, 0xffff, 33, 33, "3DSTATE_POLY_STIPPLE_PATTERN" },
+ { 0x7908, 0xffff, 3, 3, "3DSTATE_LINE_STIPPLE" },
+ { 0x7909, 0xffff, 2, 2, "3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP" },
+ { 0x7909, 0xffff, 2, 2, "3DSTATE_CLEAR_PARAMS" },
+ { 0x790a, 0xffff, 3, 3, "3DSTATE_AA_LINE_PARAMETERS" },
+ { 0x790b, 0xffff, 4, 4, "3DSTATE_GS_SVB_INDEX" },
+ { 0x790d, 0xffff, 3, 3, "3DSTATE_MULTISAMPLE", 6 },
+ { 0x790d, 0xffff, 4, 4, "3DSTATE_MULTISAMPLE", 7 },
+ { 0x7910, 0x00ff, 2, 2, "3DSTATE_CLEAR_PARAMS" },
+ { 0x7912, 0x00ff, 2, 2, "3DSTATE_PUSH_CONSTANT_ALLOC_VS" },
+ { 0x7913, 0x00ff, 2, 2, "3DSTATE_PUSH_CONSTANT_ALLOC_HS" },
+ { 0x7914, 0x00ff, 2, 2, "3DSTATE_PUSH_CONSTANT_ALLOC_DS" },
+ { 0x7915, 0x00ff, 2, 2, "3DSTATE_PUSH_CONSTANT_ALLOC_GS" },
+ { 0x7916, 0x00ff, 2, 2, "3DSTATE_PUSH_CONSTANT_ALLOC_PS" },
+ { 0x7917, 0x00ff, 2, 2+128*2, "3DSTATE_SO_DECL_LIST" },
+ { 0x7918, 0x00ff, 4, 4, "3DSTATE_SO_BUFFER" },
+ { 0x7a00, 0x00ff, 4, 6, "PIPE_CONTROL" },
+ { 0x7b00, 0x00ff, 7, 7, NULL, 7, gen7_3DPRIMITIVE },
+ { 0x7b00, 0x00ff, 6, 6, NULL, 0, gen4_3DPRIMITIVE },
+ }, *opcode_3d = NULL;
+
+ opcode = (data[0] & 0xffff0000) >> 16;
+
+ for (i = 0; i < ARRAY_SIZE(opcodes_3d); i++) {
+ if (opcode != opcodes_3d[i].opcode)
+ continue;
+
+ /* If it's marked as not our gen, skip. */
+ if (opcodes_3d[i].gen && opcodes_3d[i].gen != ctx->gen)
+ continue;
+
+ opcode_3d = &opcodes_3d[i];
+ break;
+ }
+
+ if (opcode_3d) {
+ if (opcode_3d->max_len == 1)
+ len = 1;
+ else
+ len = (data[0] & opcode_3d->len_mask) + 2;
+
+ if (len < opcode_3d->min_len ||
+ len > opcode_3d->max_len) {
+ fprintf(out, "Bad length %d in %s, expected %d-%d\n",
+ len, opcode_3d->name,
+ opcode_3d->min_len, opcode_3d->max_len);
+ }
+ } else {
+ len = (data[0] & 0x0000ffff) + 2;
+ }
+
+ switch (opcode) {
+ case 0x6000:
+ return i965_decode_urb_fence(ctx, len);
+ case 0x6001:
+ instr_out(ctx, 0, "CS_URB_STATE\n");
+ instr_out(ctx, 1,
+ "entry_size: %d [%d bytes], n_entries: %d\n",
+ (data[1] >> 4) & 0x1f,
+ (((data[1] >> 4) & 0x1f) + 1) * 64, data[1] & 0x7);
+ return len;
+ case 0x6002:
+ instr_out(ctx, 0, "CONSTANT_BUFFER: %s\n",
+ (data[0] >> 8) & 1 ? "valid" : "invalid");
+ instr_out(ctx, 1,
+ "offset: 0x%08x, length: %d bytes\n", data[1] & ~0x3f,
+ ((data[1] & 0x3f) + 1) * 64);
+ return len;
+ case 0x6101:
+ i = 0;
+ instr_out(ctx, 0, "STATE_BASE_ADDRESS\n");
+ i++;
+
+ if (IS_GEN6(devid) || IS_GEN7(devid))
+ sba_len = 10;
+ else if (IS_GEN5(devid))
+ sba_len = 8;
+ else
+ sba_len = 6;
+ if (len != sba_len)
+ fprintf(out, "Bad count in STATE_BASE_ADDRESS\n");
+
+ state_base_out(ctx, i++, "general");
+ state_base_out(ctx, i++, "surface");
+ if (IS_GEN6(devid) || IS_GEN7(devid))
+ state_base_out(ctx, i++, "dynamic");
+ state_base_out(ctx, i++, "indirect");
+ if (IS_GEN5(devid) || IS_GEN6(devid) || IS_GEN7(devid))
+ state_base_out(ctx, i++, "instruction");
+
+ state_max_out(ctx, i++, "general");
+ if (IS_GEN6(devid) || IS_GEN7(devid))
+ state_max_out(ctx, i++, "dynamic");
+ state_max_out(ctx, i++, "indirect");
+ if (IS_GEN5(devid) || IS_GEN6(devid) || IS_GEN7(devid))
+ state_max_out(ctx, i++, "instruction");
+
+ return len;
+ case 0x7800:
+ instr_out(ctx, 0, "3DSTATE_PIPELINED_POINTERS\n");
+ instr_out(ctx, 1, "VS state\n");
+ instr_out(ctx, 2, "GS state\n");
+ instr_out(ctx, 3, "Clip state\n");
+ instr_out(ctx, 4, "SF state\n");
+ instr_out(ctx, 5, "WM state\n");
+ instr_out(ctx, 6, "CC state\n");
+ return len;
+ case 0x7801:
+ if (len != 6 && len != 4)
+ fprintf(out,
+ "Bad count in 3DSTATE_BINDING_TABLE_POINTERS\n");
+ if (len == 6) {
+ instr_out(ctx, 0,
+ "3DSTATE_BINDING_TABLE_POINTERS\n");
+ instr_out(ctx, 1, "VS binding table\n");
+ instr_out(ctx, 2, "GS binding table\n");
+ instr_out(ctx, 3, "Clip binding table\n");
+ instr_out(ctx, 4, "SF binding table\n");
+ instr_out(ctx, 5, "WM binding table\n");
+ } else {
+ instr_out(ctx, 0,
+ "3DSTATE_BINDING_TABLE_POINTERS: VS mod %d, "
+ "GS mod %d, PS mod %d\n",
+ (data[0] & (1 << 8)) != 0,
+ (data[0] & (1 << 9)) != 0,
+ (data[0] & (1 << 12)) != 0);
+ instr_out(ctx, 1, "VS binding table\n");
+ instr_out(ctx, 2, "GS binding table\n");
+ instr_out(ctx, 3, "WM binding table\n");
+ }
+
+ return len;
+ case 0x7802:
+ instr_out(ctx, 0,
+ "3DSTATE_SAMPLER_STATE_POINTERS: VS mod %d, "
+ "GS mod %d, PS mod %d\n", (data[0] & (1 << 8)) != 0,
+ (data[0] & (1 << 9)) != 0,
+ (data[0] & (1 << 12)) != 0);
+ instr_out(ctx, 1, "VS sampler state\n");
+ instr_out(ctx, 2, "GS sampler state\n");
+ instr_out(ctx, 3, "WM sampler state\n");
+ return len;
+ case 0x7805:
+ /* Actually 3DSTATE_DEPTH_BUFFER on gen7. */
+ if (ctx->gen == 7)
+ break;
+
+ instr_out(ctx, 0, "3DSTATE_URB\n");
+ instr_out(ctx, 1,
+ "VS entries %d, alloc size %d (1024bit row)\n",
+ data[1] & 0xffff, ((data[1] >> 16) & 0x07f) + 1);
+ instr_out(ctx, 2,
+ "GS entries %d, alloc size %d (1024bit row)\n",
+ (data[2] >> 8) & 0x3ff, (data[2] & 7) + 1);
+ return len;
+
+ case 0x7808:
+ if ((len - 1) % 4 != 0)
+ fprintf(out, "Bad count in 3DSTATE_VERTEX_BUFFERS\n");
+ instr_out(ctx, 0, "3DSTATE_VERTEX_BUFFERS\n");
+
+ for (i = 1; i < len;) {
+ int idx, access;
+ if (IS_GEN6(devid)) {
+ idx = 26;
+ access = 20;
+ } else {
+ idx = 27;
+ access = 26;
+ }
+ instr_out(ctx, i,
+ "buffer %d: %s, pitch %db\n", data[i] >> idx,
+ data[i] & (1 << access) ? "random" :
+ "sequential", data[i] & 0x07ff);
+ i++;
+ instr_out(ctx, i++, "buffer address\n");
+ instr_out(ctx, i++, "max index\n");
+ instr_out(ctx, i++, "mbz\n");
+ }
+ return len;
+
+ case 0x7809:
+ if ((len + 1) % 2 != 0)
+ fprintf(out, "Bad count in 3DSTATE_VERTEX_ELEMENTS\n");
+ instr_out(ctx, 0, "3DSTATE_VERTEX_ELEMENTS\n");
+
+ for (i = 1; i < len;) {
+ instr_out(ctx, i,
+ "buffer %d: %svalid, type 0x%04x, "
+ "src offset 0x%04x bytes\n",
+ data[i] >> ((IS_GEN6(devid) || IS_GEN7(devid)) ? 26 : 27),
+ data[i] & (1 << ((IS_GEN6(devid) || IS_GEN7(devid)) ? 25 : 26)) ?
+ "" : "in", (data[i] >> 16) & 0x1ff,
+ data[i] & 0x07ff);
+ i++;
+ instr_out(ctx, i, "(%s, %s, %s, %s), "
+ "dst offset 0x%02x bytes\n",
+ get_965_element_component(data[i], 0),
+ get_965_element_component(data[i], 1),
+ get_965_element_component(data[i], 2),
+ get_965_element_component(data[i], 3),
+ (data[i] & 0xff) * 4);
+ i++;
+ }
+ return len;
+
+ case 0x780d:
+ instr_out(ctx, 0,
+ "3DSTATE_VIEWPORT_STATE_POINTERS\n");
+ instr_out(ctx, 1, "clip\n");
+ instr_out(ctx, 2, "sf\n");
+ instr_out(ctx, 3, "cc\n");
+ return len;
+
+ case 0x780a:
+ instr_out(ctx, 0, "3DSTATE_INDEX_BUFFER\n");
+ instr_out(ctx, 1, "beginning buffer address\n");
+ instr_out(ctx, 2, "ending buffer address\n");
+ return len;
+
+ case 0x780f:
+ instr_out(ctx, 0, "3DSTATE_SCISSOR_POINTERS\n");
+ instr_out(ctx, 1, "scissor rect offset\n");
+ return len;
+
+ case 0x7810:
+ instr_out(ctx, 0, "3DSTATE_VS\n");
+ instr_out(ctx, 1, "kernel pointer\n");
+ instr_out(ctx, 2,
+ "SPF=%d, VME=%d, Sampler Count %d, "
+ "Binding table count %d\n", (data[2] >> 31) & 1,
+ (data[2] >> 30) & 1, (data[2] >> 27) & 7,
+ (data[2] >> 18) & 0xff);
+ instr_out(ctx, 3, "scratch offset\n");
+ instr_out(ctx, 4,
+ "Dispatch GRF start %d, VUE read length %d, "
+ "VUE read offset %d\n", (data[4] >> 20) & 0x1f,
+ (data[4] >> 11) & 0x3f, (data[4] >> 4) & 0x3f);
+ instr_out(ctx, 5,
+ "Max Threads %d, Vertex Cache %sable, "
+ "VS func %sable\n", ((data[5] >> 25) & 0x7f) + 1,
+ (data[5] & (1 << 1)) != 0 ? "dis" : "en",
+ (data[5] & 1) != 0 ? "en" : "dis");
+ return len;
+
+ case 0x7811:
+ instr_out(ctx, 0, "3DSTATE_GS\n");
+ instr_out(ctx, 1, "kernel pointer\n");
+ instr_out(ctx, 2,
+ "SPF=%d, VME=%d, Sampler Count %d, "
+ "Binding table count %d\n", (data[2] >> 31) & 1,
+ (data[2] >> 30) & 1, (data[2] >> 27) & 7,
+ (data[2] >> 18) & 0xff);
+ instr_out(ctx, 3, "scratch offset\n");
+ instr_out(ctx, 4,
+ "Dispatch GRF start %d, VUE read length %d, "
+ "VUE read offset %d\n", (data[4] & 0xf),
+ (data[4] >> 11) & 0x3f, (data[4] >> 4) & 0x3f);
+ instr_out(ctx, 5,
+ "Max Threads %d, Rendering %sable\n",
+ ((data[5] >> 25) & 0x7f) + 1,
+ (data[5] & (1 << 8)) != 0 ? "en" : "dis");
+ instr_out(ctx, 6,
+ "Reorder %sable, Discard Adjaceny %sable, "
+ "GS %sable\n",
+ (data[6] & (1 << 30)) != 0 ? "en" : "dis",
+ (data[6] & (1 << 29)) != 0 ? "en" : "dis",
+ (data[6] & (1 << 15)) != 0 ? "en" : "dis");
+ return len;
+
+ case 0x7812:
+ instr_out(ctx, 0, "3DSTATE_CLIP\n");
+ instr_out(ctx, 1,
+ "UserClip distance cull test mask 0x%x\n",
+ data[1] & 0xff);
+ instr_out(ctx, 2,
+ "Clip %sable, API mode %s, Viewport XY test %sable, "
+ "Viewport Z test %sable, Guardband test %sable, Clip mode %d, "
+ "Perspective Divide %sable, Non-Perspective Barycentric %sable, "
+ "Tri Provoking %d, Line Provoking %d, Trifan Provoking %d\n",
+ (data[2] & (1 << 31)) != 0 ? "en" : "dis",
+ (data[2] & (1 << 30)) != 0 ? "D3D" : "OGL",
+ (data[2] & (1 << 28)) != 0 ? "en" : "dis",
+ (data[2] & (1 << 27)) != 0 ? "en" : "dis",
+ (data[2] & (1 << 26)) != 0 ? "en" : "dis",
+ (data[2] >> 13) & 7,
+ (data[2] & (1 << 9)) != 0 ? "dis" : "en",
+ (data[2] & (1 << 8)) != 0 ? "en" : "dis",
+ (data[2] >> 4) & 3, (data[2] >> 2) & 3,
+ (data[2] & 3));
+ instr_out(ctx, 3,
+ "Min PointWidth %d, Max PointWidth %d, "
+ "Force Zero RTAIndex %sable, Max VPIndex %d\n",
+ (data[3] >> 17) & 0x7ff, (data[3] >> 6) & 0x7ff,
+ (data[3] & (1 << 5)) != 0 ? "en" : "dis",
+ (data[3] & 0xf));
+ return len;
+
+ case 0x7813:
+ if (ctx->gen == 7)
+ break;
+
+ instr_out(ctx, 0, "3DSTATE_SF\n");
+ instr_out(ctx, 1,
+ "Attrib Out %d, Attrib Swizzle %sable, VUE read length %d, "
+ "VUE read offset %d\n", (data[1] >> 22) & 0x3f,
+ (data[1] & (1 << 21)) != 0 ? "en" : "dis",
+ (data[1] >> 11) & 0x1f, (data[1] >> 4) & 0x3f);
+ instr_out(ctx, 2,
+ "Legacy Global DepthBias %sable, FrontFace fill %d, BF fill %d, "
+ "VP transform %sable, FrontWinding_%s\n",
+ (data[2] & (1 << 11)) != 0 ? "en" : "dis",
+ (data[2] >> 5) & 3, (data[2] >> 3) & 3,
+ (data[2] & (1 << 1)) != 0 ? "en" : "dis",
+ (data[2] & 1) != 0 ? "CCW" : "CW");
+ instr_out(ctx, 3,
+ "AA %sable, CullMode %d, Scissor %sable, Multisample m ode %d\n",
+ (data[3] & (1 << 31)) != 0 ? "en" : "dis",
+ (data[3] >> 29) & 3,
+ (data[3] & (1 << 11)) != 0 ? "en" : "dis",
+ (data[3] >> 8) & 3);
+ instr_out(ctx, 4,
+ "Last Pixel %sable, SubPixel Precision %d, Use PixelWidth %d\n",
+ (data[4] & (1 << 31)) != 0 ? "en" : "dis",
+ (data[4] & (1 << 12)) != 0 ? 4 : 8,
+ (data[4] & (1 << 11)) != 0);
+ instr_out(ctx, 5,
+ "Global Depth Offset Constant %f\n",
+ *(float *)(&data[5]));
+ instr_out(ctx, 6, "Global Depth Offset Scale %f\n",
+ *(float *)(&data[6]));
+ instr_out(ctx, 7, "Global Depth Offset Clamp %f\n",
+ *(float *)(&data[7]));
+
+ for (i = 0, j = 0; i < 8; i++, j += 2)
+ instr_out(ctx, i + 8,
+ "Attrib %d (Override %s%s%s%s, Const Source %d, Swizzle Select %d, "
+ "Source %d); Attrib %d (Override %s%s%s%s, Const Source %d, Swizzle Select %d, Source %d)\n",
+ j + 1,
+ (data[8 + i] & (1 << 31)) != 0 ? "W" : "",
+ (data[8 + i] & (1 << 30)) != 0 ? "Z" : "",
+ (data[8 + i] & (1 << 29)) != 0 ? "Y" : "",
+ (data[8 + i] & (1 << 28)) != 0 ? "X" : "",
+ (data[8 + i] >> 25) & 3,
+ (data[8 + i] >> 22) & 3,
+ (data[8 + i] >> 16) & 0x1f, j,
+ (data[8 + i] & (1 << 15)) != 0 ? "W" : "",
+ (data[8 + i] & (1 << 14)) != 0 ? "Z" : "",
+ (data[8 + i] & (1 << 13)) != 0 ? "Y" : "",
+ (data[8 + i] & (1 << 12)) != 0 ? "X" : "",
+ (data[8 + i] >> 9) & 3,
+ (data[8 + i] >> 6) & 3, (data[8 + i] & 0x1f));
+ instr_out(ctx, 16,
+ "Point Sprite TexCoord Enable\n");
+ instr_out(ctx, 17, "Const Interp Enable\n");
+ instr_out(ctx, 18,
+ "Attrib 7-0 WrapShortest Enable\n");
+ instr_out(ctx, 19,
+ "Attrib 15-8 WrapShortest Enable\n");
+
+ return len;
+
+ case 0x7900:
+ instr_out(ctx, 0, "3DSTATE_DRAWING_RECTANGLE\n");
+ instr_out(ctx, 1, "top left: %d,%d\n",
+ data[1] & 0xffff, (data[1] >> 16) & 0xffff);
+ instr_out(ctx, 2, "bottom right: %d,%d\n",
+ data[2] & 0xffff, (data[2] >> 16) & 0xffff);
+ instr_out(ctx, 3, "origin: %d,%d\n",
+ (int)data[3] & 0xffff, ((int)data[3] >> 16) & 0xffff);
+
+ return len;
+
+ case 0x7905:
+ instr_out(ctx, 0, "3DSTATE_DEPTH_BUFFER\n");
+ if (IS_GEN5(devid) || IS_GEN6(devid))
+ instr_out(ctx, 1,
+ "%s, %s, pitch = %d bytes, %stiled, HiZ %d, Separate Stencil %d\n",
+ get_965_surfacetype(data[1] >> 29),
+ get_965_depthformat((data[1] >> 18) & 0x7),
+ (data[1] & 0x0001ffff) + 1,
+ data[1] & (1 << 27) ? "" : "not ",
+ (data[1] & (1 << 22)) != 0,
+ (data[1] & (1 << 21)) != 0);
+ else
+ instr_out(ctx, 1,
+ "%s, %s, pitch = %d bytes, %stiled\n",
+ get_965_surfacetype(data[1] >> 29),
+ get_965_depthformat((data[1] >> 18) & 0x7),
+ (data[1] & 0x0001ffff) + 1,
+ data[1] & (1 << 27) ? "" : "not ");
+ instr_out(ctx, 2, "depth offset\n");
+ instr_out(ctx, 3, "%dx%d\n",
+ ((data[3] & 0x0007ffc0) >> 6) + 1,
+ ((data[3] & 0xfff80000) >> 19) + 1);
+ instr_out(ctx, 4, "volume depth\n");
+ if (len >= 6)
+ instr_out(ctx, 5, "\n");
+ if (len >= 7) {
+ if (IS_GEN6(devid))
+ instr_out(ctx, 6, "\n");
+ else
+ instr_out(ctx, 6,
+ "render target view extent\n");
+ }
+
+ return len;
+
+ case 0x7a00:
+ if (IS_GEN6(devid) || IS_GEN7(devid)) {
+ if (len != 4 && len != 5)
+ fprintf(out, "Bad count in PIPE_CONTROL\n");
+
+ switch ((data[1] >> 14) & 0x3) {
+ case 0:
+ desc1 = "no write";
+ break;
+ case 1:
+ desc1 = "qword write";
+ break;
+ case 2:
+ desc1 = "PS_DEPTH_COUNT write";
+ break;
+ case 3:
+ desc1 = "TIMESTAMP write";
+ break;
+ }
+ instr_out(ctx, 0, "PIPE_CONTROL\n");
+ instr_out(ctx, 1,
+ "%s, %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ desc1,
+ data[1] & (1 << 20) ? "cs stall, " : "",
+ data[1] & (1 << 19) ?
+ "global snapshot count reset, " : "",
+ data[1] & (1 << 18) ? "tlb invalidate, " : "",
+ data[1] & (1 << 17) ? "gfdt flush, " : "",
+ data[1] & (1 << 17) ? "media state clear, " :
+ "",
+ data[1] & (1 << 13) ? "depth stall, " : "",
+ data[1] & (1 << 12) ?
+ "render target cache flush, " : "",
+ data[1] & (1 << 11) ?
+ "instruction cache invalidate, " : "",
+ data[1] & (1 << 10) ?
+ "texture cache invalidate, " : "",
+ data[1] & (1 << 9) ?
+ "indirect state invalidate, " : "",
+ data[1] & (1 << 8) ? "notify irq, " : "",
+ data[1] & (1 << 7) ? "PIPE_CONTROL flush, " :
+ "",
+ data[1] & (1 << 6) ? "protect mem app_id, " :
+ "", data[1] & (1 << 5) ? "DC flush, " : "",
+ data[1] & (1 << 4) ? "vf fetch invalidate, " :
+ "",
+ data[1] & (1 << 3) ?
+ "constant cache invalidate, " : "",
+ data[1] & (1 << 2) ?
+ "state cache invalidate, " : "",
+ data[1] & (1 << 1) ? "stall at scoreboard, " :
+ "",
+ data[1] & (1 << 0) ? "depth cache flush, " :
+ "");
+ if (len == 5) {
+ instr_out(ctx, 2,
+ "destination address\n");
+ instr_out(ctx, 3,
+ "immediate dword low\n");
+ instr_out(ctx, 4,
+ "immediate dword high\n");
+ } else {
+ for (i = 2; i < len; i++) {
+ instr_out(ctx, i, "\n");
+ }
+ }
+ return len;
+ } else {
+ if (len != 4)
+ fprintf(out, "Bad count in PIPE_CONTROL\n");
+
+ switch ((data[0] >> 14) & 0x3) {
+ case 0:
+ desc1 = "no write";
+ break;
+ case 1:
+ desc1 = "qword write";
+ break;
+ case 2:
+ desc1 = "PS_DEPTH_COUNT write";
+ break;
+ case 3:
+ desc1 = "TIMESTAMP write";
+ break;
+ }
+ instr_out(ctx, 0,
+ "PIPE_CONTROL: %s, %sdepth stall, %sRC write flush, "
+ "%sinst flush\n",
+ desc1,
+ data[0] & (1 << 13) ? "" : "no ",
+ data[0] & (1 << 12) ? "" : "no ",
+ data[0] & (1 << 11) ? "" : "no ");
+ instr_out(ctx, 1, "destination address\n");
+ instr_out(ctx, 2, "immediate dword low\n");
+ instr_out(ctx, 3, "immediate dword high\n");
+ return len;
+ }
+ }
+
+ if (opcode_3d) {
+ if (opcode_3d->func) {
+ return opcode_3d->func(ctx);
+ } else {
+ instr_out(ctx, 0, "%s\n", opcode_3d->name);
+
+ for (i = 1; i < len; i++) {
+ instr_out(ctx, i, "dword %d\n", i);
+ }
+ return len;
+ }
+ }
+
+ instr_out(ctx, 0, "3D UNKNOWN: 3d_965 opcode = 0x%x\n",
+ opcode);
+ return 1;
+}
+
+static int
+decode_3d_i830(struct drm_intel_decode *ctx)
+{
+ unsigned int idx;
+ uint32_t opcode;
+ uint32_t *data = ctx->data;
+
+ struct {
+ uint32_t opcode;
+ unsigned int min_len;
+ unsigned int max_len;
+ const char *name;
+ } opcodes_3d[] = {
+ { 0x02, 1, 1, "3DSTATE_MODES_3" },
+ { 0x03, 1, 1, "3DSTATE_ENABLES_1" },
+ { 0x04, 1, 1, "3DSTATE_ENABLES_2" },
+ { 0x05, 1, 1, "3DSTATE_VFT0" },
+ { 0x06, 1, 1, "3DSTATE_AA" },
+ { 0x07, 1, 1, "3DSTATE_RASTERIZATION_RULES" },
+ { 0x08, 1, 1, "3DSTATE_MODES_1" },
+ { 0x09, 1, 1, "3DSTATE_STENCIL_TEST" },
+ { 0x0a, 1, 1, "3DSTATE_VFT1" },
+ { 0x0b, 1, 1, "3DSTATE_INDPT_ALPHA_BLEND" },
+ { 0x0c, 1, 1, "3DSTATE_MODES_5" },
+ { 0x0d, 1, 1, "3DSTATE_MAP_BLEND_OP" },
+ { 0x0e, 1, 1, "3DSTATE_MAP_BLEND_ARG" },
+ { 0x0f, 1, 1, "3DSTATE_MODES_2" },
+ { 0x15, 1, 1, "3DSTATE_FOG_COLOR" },
+ { 0x16, 1, 1, "3DSTATE_MODES_4"},
+ }, *opcode_3d;
+
+ opcode = (data[0] & 0x1f000000) >> 24;
+
+ switch (opcode) {
+ case 0x1f:
+ return decode_3d_primitive(ctx);
+ case 0x1d:
+ return decode_3d_1d(ctx);
+ case 0x1c:
+ return decode_3d_1c(ctx);
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(opcodes_3d); idx++) {
+ opcode_3d = &opcodes_3d[idx];
+ if ((data[0] & 0x1f000000) >> 24 == opcode_3d->opcode) {
+ unsigned int len = 1, i;
+
+ instr_out(ctx, 0, "%s\n", opcode_3d->name);
+ if (opcode_3d->max_len > 1) {
+ len = (data[0] & 0xff) + 2;
+ if (len < opcode_3d->min_len ||
+ len > opcode_3d->max_len) {
+ fprintf(out, "Bad count in %s\n",
+ opcode_3d->name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ instr_out(ctx, i, "dword %d\n", i);
+ }
+ return len;
+ }
+ }
+
+ instr_out(ctx, 0, "3D UNKNOWN: 3d_i830 opcode = 0x%x\n",
+ opcode);
+ return 1;
+}
+
+struct drm_intel_decode *
+drm_intel_decode_context_alloc(uint32_t devid)
+{
+ struct drm_intel_decode *ctx;
+
+ ctx = calloc(1, sizeof(struct drm_intel_decode));
+ if (!ctx)
+ return NULL;
+
+ ctx->devid = devid;
+ ctx->out = stdout;
+
+ if (IS_GEN9(devid))
+ ctx->gen = 9;
+ else if (IS_GEN8(devid))
+ ctx->gen = 8;
+ else if (IS_GEN7(devid))
+ ctx->gen = 7;
+ else if (IS_GEN6(devid))
+ ctx->gen = 6;
+ else if (IS_GEN5(devid))
+ ctx->gen = 5;
+ else if (IS_GEN4(devid))
+ ctx->gen = 4;
+ else if (IS_9XX(devid))
+ ctx->gen = 3;
+ else {
+ assert(IS_GEN2(devid));
+ ctx->gen = 2;
+ }
+
+ return ctx;
+}
+
+void
+drm_intel_decode_context_free(struct drm_intel_decode *ctx)
+{
+ free(ctx);
+}
+
+void
+drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
+ int dump_past_end)
+{
+ ctx->dump_past_end = !!dump_past_end;
+}
+
+void
+drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
+ void *data, uint32_t hw_offset, int count)
+{
+ ctx->base_data = data;
+ ctx->base_hw_offset = hw_offset;
+ ctx->base_count = count;
+}
+
+void
+drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
+ uint32_t head, uint32_t tail)
+{
+ ctx->head = head;
+ ctx->tail = tail;
+}
+
+void
+drm_intel_decode_set_output_file(struct drm_intel_decode *ctx,
+ FILE *output)
+{
+ ctx->out = output;
+}
+
+/**
+ * Decodes an i830-i915 batch buffer, writing the output to stdout.
+ *
+ * \param data batch buffer contents
+ * \param count number of DWORDs to decode in the batch buffer
+ * \param hw_offset hardware address for the buffer
+ */
+void
+drm_intel_decode(struct drm_intel_decode *ctx)
+{
+ int ret;
+ unsigned int index = 0;
+ uint32_t devid;
+ int size = ctx->base_count * 4;
+ void *temp;
+
+ if (!ctx)
+ return;
+
+ /* Put a scratch page full of obviously undefined data after
+ * the batchbuffer. This lets us avoid a bunch of length
+ * checking in statically sized packets.
+ */
+ temp = malloc(size + 4096);
+ memcpy(temp, ctx->base_data, size);
+ memset((char *)temp + size, 0xd0, 4096);
+ ctx->data = temp;
+
+ ctx->hw_offset = ctx->base_hw_offset;
+ ctx->count = ctx->base_count;
+
+ devid = ctx->devid;
+ head_offset = ctx->head;
+ tail_offset = ctx->tail;
+ out = ctx->out;
+
+ saved_s2_set = 0;
+ saved_s4_set = 1;
+
+ while (ctx->count > 0) {
+ index = 0;
+
+ switch ((ctx->data[index] & 0xe0000000) >> 29) {
+ case 0x0:
+ ret = decode_mi(ctx);
+
+ /* If MI_BATCHBUFFER_END happened, then dump
+ * the rest of the output in case we some day
+ * want it in debugging, but don't decode it
+ * since it'll just confuse in the common
+ * case.
+ */
+ if (ret == -1) {
+ if (ctx->dump_past_end) {
+ index++;
+ } else {
+ for (index = index + 1; index < ctx->count;
+ index++) {
+ instr_out(ctx, index, "\n");
+ }
+ }
+ } else
+ index += ret;
+ break;
+ case 0x2:
+ index += decode_2d(ctx);
+ break;
+ case 0x3:
+ if (IS_9XX(devid) && !IS_GEN3(devid)) {
+ index +=
+ decode_3d_965(ctx);
+ } else if (IS_GEN3(devid)) {
+ index += decode_3d(ctx);
+ } else {
+ index +=
+ decode_3d_i830(ctx);
+ }
+ break;
+ default:
+ instr_out(ctx, index, "UNKNOWN\n");
+ index++;
+ break;
+ }
+ fflush(out);
+
+ if (ctx->count < index)
+ break;
+
+ ctx->count -= index;
+ ctx->data += index;
+ ctx->hw_offset += 4 * index;
+ }
+
+ free(temp);
+}
diff --git a/chromium/third_party/libdrm/src/intel/libdrm_intel.pc.in b/chromium/third_party/libdrm/src/intel/libdrm_intel.pc.in
new file mode 100644
index 00000000000..670e4fee3d8
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/libdrm_intel.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_intel
+Description: Userspace interface to intel kernel DRM services
+Version: @PACKAGE_VERSION@
+Requires: libdrm
+Libs: -L${libdir} -ldrm_intel
+Cflags: -I${includedir} -I${includedir}/libdrm
diff --git a/chromium/third_party/libdrm/src/intel/mm.c b/chromium/third_party/libdrm/src/intel/mm.c
new file mode 100644
index 00000000000..954e9dcb160
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/mm.c
@@ -0,0 +1,264 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "xf86drm.h"
+#include "libdrm_macros.h"
+#include "mm.h"
+
+drm_private void mmDumpMemInfo(const struct mem_block *heap)
+{
+ drmMsg("Memory heap %p:\n", (void *)heap);
+ if (heap == 0) {
+ drmMsg(" heap == 0\n");
+ } else {
+ const struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ drmMsg(" Offset:%08x, Size:%08x, %c%c\n", p->ofs,
+ p->size, p->free ? 'F' : '.',
+ p->reserved ? 'R' : '.');
+ }
+
+ drmMsg("\nFree list:\n");
+
+ for (p = heap->next_free; p != heap; p = p->next_free) {
+ drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n", p->ofs,
+ p->size, p->free ? 'F' : '.',
+ p->reserved ? 'R' : '.');
+ }
+
+ }
+ drmMsg("End of memory blocks\n");
+}
+
+drm_private struct mem_block *mmInit(int ofs, int size)
+{
+ struct mem_block *heap, *block;
+
+ if (size <= 0)
+ return NULL;
+
+ heap = (struct mem_block *)calloc(1, sizeof(struct mem_block));
+ if (!heap)
+ return NULL;
+
+ block = (struct mem_block *)calloc(1, sizeof(struct mem_block));
+ if (!block) {
+ free(heap);
+ return NULL;
+ }
+
+ heap->next = block;
+ heap->prev = block;
+ heap->next_free = block;
+ heap->prev_free = block;
+
+ block->heap = heap;
+ block->next = heap;
+ block->prev = heap;
+ block->next_free = heap;
+ block->prev_free = heap;
+
+ block->ofs = ofs;
+ block->size = size;
+ block->free = 1;
+
+ return heap;
+}
+
+static struct mem_block *SliceBlock(struct mem_block *p,
+ int startofs, int size,
+ int reserved, int alignment)
+{
+ struct mem_block *newblock;
+
+ /* break left [p, newblock, p->next], then p = newblock */
+ if (startofs > p->ofs) {
+ newblock =
+ (struct mem_block *)calloc(1, sizeof(struct mem_block));
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs;
+ newblock->size = p->size - (startofs - p->ofs);
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* break right, also [p, newblock, p->next] */
+ if (size < p->size) {
+ newblock =
+ (struct mem_block *)calloc(1, sizeof(struct mem_block));
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs + size;
+ newblock->size = p->size - size;
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size = size;
+ }
+
+ /* p = middle block */
+ p->free = 0;
+
+ /* Remove p from the free list:
+ */
+ p->next_free->prev_free = p->prev_free;
+ p->prev_free->next_free = p->next_free;
+
+ p->next_free = 0;
+ p->prev_free = 0;
+
+ p->reserved = reserved;
+ return p;
+}
+
+drm_private struct mem_block *mmAllocMem(struct mem_block *heap, int size,
+ int align2, int startSearch)
+{
+ struct mem_block *p;
+ const int mask = (1 << align2) - 1;
+ int startofs = 0;
+ int endofs;
+
+ if (!heap || align2 < 0 || size <= 0)
+ return NULL;
+
+ for (p = heap->next_free; p != heap; p = p->next_free) {
+ assert(p->free);
+
+ startofs = (p->ofs + mask) & ~mask;
+ if (startofs < startSearch) {
+ startofs = startSearch;
+ }
+ endofs = startofs + size;
+ if (endofs <= (p->ofs + p->size))
+ break;
+ }
+
+ if (p == heap)
+ return NULL;
+
+ assert(p->free);
+ p = SliceBlock(p, startofs, size, 0, mask + 1);
+
+ return p;
+}
+
+static int Join2Blocks(struct mem_block *p)
+{
+ /* XXX there should be some assertions here */
+
+ /* NOTE: heap->free == 0 */
+
+ if (p->free && p->next->free) {
+ struct mem_block *q = p->next;
+
+ assert(p->ofs + p->size == q->ofs);
+ p->size += q->size;
+
+ p->next = q->next;
+ q->next->prev = p;
+
+ q->next_free->prev_free = q->prev_free;
+ q->prev_free->next_free = q->next_free;
+
+ free(q);
+ return 1;
+ }
+ return 0;
+}
+
+drm_private int mmFreeMem(struct mem_block *b)
+{
+ if (!b)
+ return 0;
+
+ if (b->free) {
+ drmMsg("block already free\n");
+ return -1;
+ }
+ if (b->reserved) {
+ drmMsg("block is reserved\n");
+ return -1;
+ }
+
+ b->free = 1;
+ b->next_free = b->heap->next_free;
+ b->prev_free = b->heap;
+ b->next_free->prev_free = b;
+ b->prev_free->next_free = b;
+
+ Join2Blocks(b);
+ if (b->prev != b->heap)
+ Join2Blocks(b->prev);
+
+ return 0;
+}
+
+drm_private void mmDestroy(struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap)
+ return;
+
+ for (p = heap->next; p != heap;) {
+ struct mem_block *next = p->next;
+ free(p);
+ p = next;
+ }
+
+ free(heap);
+}
diff --git a/chromium/third_party/libdrm/src/intel/mm.h b/chromium/third_party/libdrm/src/intel/mm.h
new file mode 100644
index 00000000000..8d83743fd1c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/mm.h
@@ -0,0 +1,84 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * Memory manager code. Primarily used by device drivers to manage texture
+ * heaps, etc.
+ */
+
+#ifndef MM_H
+#define MM_H
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "libdrm_macros.h"
+
+struct mem_block {
+ struct mem_block *next, *prev;
+ struct mem_block *next_free, *prev_free;
+ struct mem_block *heap;
+ int ofs, size;
+ unsigned int free:1;
+ unsigned int reserved:1;
+};
+
+/**
+ * input: total size in bytes
+ * return: a heap pointer if OK, NULL if error
+ */
+drm_private extern struct mem_block *mmInit(int ofs, int size);
+
+/**
+ * Allocate 'size' bytes with 2^align2 bytes alignment,
+ * restrict the search to free memory after 'startSearch'
+ * depth and back buffers should be in different 4mb banks
+ * to get better page hits if possible
+ * input: size = size of block
+ * align2 = 2^align2 bytes alignment
+ * startSearch = linear offset from start of heap to begin search
+ * return: pointer to the allocated block, 0 if error
+ */
+drm_private extern struct mem_block *mmAllocMem(struct mem_block *heap,
+ int size, int align2,
+ int startSearch);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a block
+ * return: 0 if OK, -1 if error
+ */
+drm_private extern int mmFreeMem(struct mem_block *b);
+
+/**
+ * destroy MM
+ */
+drm_private extern void mmDestroy(struct mem_block *mmInit);
+
+/**
+ * For debuging purpose.
+ */
+drm_private extern void mmDumpMemInfo(const struct mem_block *mmInit);
+
+#endif
diff --git a/chromium/third_party/libdrm/src/intel/test_decode.c b/chromium/third_party/libdrm/src/intel/test_decode.c
new file mode 100644
index 00000000000..b4eddcd1d70
--- /dev/null
+++ b/chromium/third_party/libdrm/src/intel/test_decode.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <err.h>
+
+#include "libdrm_macros.h"
+#include "intel_bufmgr.h"
+#include "intel_chipset.h"
+
+#define HW_OFFSET 0x12300000
+
+static void
+usage(void)
+{
+ fprintf(stderr, "usage:\n");
+ fprintf(stderr, " test_decode <batch>\n");
+ fprintf(stderr, " test_decode <batch> -dump\n");
+ exit(1);
+}
+
+static void
+read_file(const char *filename, void **ptr, size_t *size)
+{
+ int fd, ret;
+ struct stat st;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ errx(1, "couldn't open `%s'", filename);
+
+ ret = fstat(fd, &st);
+ if (ret)
+ errx(1, "couldn't stat `%s'", filename);
+
+ *size = st.st_size;
+ *ptr = drm_mmap(NULL, st.st_size, PROT_READ, MAP_SHARED, fd, 0);
+ if (*ptr == MAP_FAILED)
+ errx(1, "couldn't map `%s'", filename);
+
+ close(fd);
+}
+
+static void
+dump_batch(struct drm_intel_decode *ctx, const char *batch_filename)
+{
+ void *batch_ptr;
+ size_t batch_size;
+
+ read_file(batch_filename, &batch_ptr, &batch_size);
+
+ drm_intel_decode_set_batch_pointer(ctx, batch_ptr, HW_OFFSET,
+ batch_size / 4);
+ drm_intel_decode_set_output_file(ctx, stdout);
+
+ drm_intel_decode(ctx);
+}
+
+static void
+compare_batch(struct drm_intel_decode *ctx, const char *batch_filename)
+{
+ FILE *out = NULL;
+ void *ptr, *ref_ptr, *batch_ptr;
+#ifdef HAVE_OPEN_MEMSTREAM
+ size_t size;
+#endif
+ size_t ref_size, batch_size;
+ const char *ref_suffix = "-ref.txt";
+ char *ref_filename;
+
+ ref_filename = malloc(strlen(batch_filename) + strlen(ref_suffix) + 1);
+ sprintf(ref_filename, "%s%s", batch_filename, ref_suffix);
+
+ /* Read the batch and reference. */
+ read_file(batch_filename, &batch_ptr, &batch_size);
+ read_file(ref_filename, &ref_ptr, &ref_size);
+
+ /* Set up our decode output in memory, because I don't want to
+ * figure out how to output to a file in a safe and sane way
+ * inside of an automake project's test infrastructure.
+ */
+#ifdef HAVE_OPEN_MEMSTREAM
+ out = open_memstream((char **)&ptr, &size);
+#else
+ fprintf(stderr, "platform lacks open_memstream, skipping.\n");
+ exit(77);
+#endif
+
+ drm_intel_decode_set_batch_pointer(ctx, batch_ptr, HW_OFFSET,
+ batch_size / 4);
+ drm_intel_decode_set_output_file(ctx, out);
+
+ drm_intel_decode(ctx);
+
+ if (strcmp(ref_ptr, ptr) != 0) {
+ fprintf(stderr, "Decode mismatch with reference `%s'.\n",
+ ref_filename);
+ fprintf(stderr, "You can dump the new output using:\n");
+ fprintf(stderr, " test_decode \"%s\" -dump\n", batch_filename);
+ exit(1);
+ }
+
+ fclose(out);
+ free(ref_filename);
+ free(ptr);
+}
+
+static uint16_t
+infer_devid(const char *batch_filename)
+{
+ struct {
+ const char *name;
+ uint16_t devid;
+ } chipsets[] = {
+ { "830", 0x3577},
+ { "855", 0x3582},
+ { "945", 0x2772},
+ { "gen4", 0x2a02 },
+ { "gm45", 0x2a42 },
+ { "gen5", PCI_CHIP_ILD_G },
+ { "gen6", PCI_CHIP_SANDYBRIDGE_GT2 },
+ { "gen7", PCI_CHIP_IVYBRIDGE_GT2 },
+ { "gen8", 0x1616 },
+ { NULL, 0 },
+ };
+ int i;
+
+ for (i = 0; chipsets[i].name != NULL; i++) {
+ if (strstr(batch_filename, chipsets[i].name))
+ return chipsets[i].devid;
+ }
+
+ fprintf(stderr, "Couldn't guess chipset id from batch filename `%s'.\n",
+ batch_filename);
+ fprintf(stderr, "Must be contain one of:\n");
+ for (i = 0; chipsets[i].name != NULL; i++) {
+ fprintf(stderr, " %s\n", chipsets[i].name);
+ }
+ exit(1);
+}
+
+int
+main(int argc, char **argv)
+{
+ uint16_t devid;
+ struct drm_intel_decode *ctx;
+
+ if (argc < 2)
+ usage();
+
+
+ devid = infer_devid(argv[1]);
+
+ ctx = drm_intel_decode_context_alloc(devid);
+
+ if (argc == 3) {
+ if (strcmp(argv[2], "-dump") == 0)
+ dump_batch(ctx, argv[1]);
+ else
+ usage();
+ } else {
+ compare_batch(ctx, argv[1]);
+ }
+
+ drm_intel_decode_context_free(ctx);
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/libdrm.pc.in b/chromium/third_party/libdrm/src/libdrm.pc.in
new file mode 100644
index 00000000000..b46e2a64d8c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libdrm.pc.in
@@ -0,0 +1,10 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm
+Description: Userspace interface to kernel DRM services
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -ldrm
+Cflags: -I${includedir} -I${includedir}/libdrm
diff --git a/chromium/third_party/libdrm/src/libdrm_lists.h b/chromium/third_party/libdrm/src/libdrm_lists.h
new file mode 100644
index 00000000000..8926d8d1a67
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libdrm_lists.h
@@ -0,0 +1,118 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+
+/*
+ * List macros heavily inspired by the Linux kernel
+ * list handling. No list looping yet.
+ */
+
+#include <stddef.h>
+
+typedef struct _drmMMListHead
+{
+ struct _drmMMListHead *prev;
+ struct _drmMMListHead *next;
+} drmMMListHead;
+
+#define DRMINITLISTHEAD(__item) \
+ do{ \
+ (__item)->prev = (__item); \
+ (__item)->next = (__item); \
+ } while (0)
+
+#define DRMLISTADD(__item, __list) \
+ do { \
+ (__item)->prev = (__list); \
+ (__item)->next = (__list)->next; \
+ (__list)->next->prev = (__item); \
+ (__list)->next = (__item); \
+ } while (0)
+
+#define DRMLISTADDTAIL(__item, __list) \
+ do { \
+ (__item)->next = (__list); \
+ (__item)->prev = (__list)->prev; \
+ (__list)->prev->next = (__item); \
+ (__list)->prev = (__item); \
+ } while(0)
+
+#define DRMLISTDEL(__item) \
+ do { \
+ (__item)->prev->next = (__item)->next; \
+ (__item)->next->prev = (__item)->prev; \
+ } while(0)
+
+#define DRMLISTDELINIT(__item) \
+ do { \
+ (__item)->prev->next = (__item)->next; \
+ (__item)->next->prev = (__item)->prev; \
+ (__item)->next = (__item); \
+ (__item)->prev = (__item); \
+ } while(0)
+
+#define DRMLISTENTRY(__type, __item, __field) \
+ ((__type *)(((char *) (__item)) - offsetof(__type, __field)))
+
+#define DRMLISTEMPTY(__item) ((__item)->next == (__item))
+
+#define DRMLISTSINGLE(__list) \
+ (!DRMLISTEMPTY(__list) && ((__list)->next == (__list)->prev))
+
+#define DRMLISTFOREACH(__item, __list) \
+ for ((__item) = (__list)->next; \
+ (__item) != (__list); (__item) = (__item)->next)
+
+#define DRMLISTFOREACHSAFE(__item, __temp, __list) \
+ for ((__item) = (__list)->next, (__temp) = (__item)->next; \
+ (__item) != (__list); \
+ (__item) = (__temp), (__temp) = (__item)->next)
+
+#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list) \
+ for ((__item) = (__list)->prev, (__temp) = (__item)->prev; \
+ (__item) != (__list); \
+ (__item) = (__temp), (__temp) = (__item)->prev)
+
+#define DRMLISTFOREACHENTRY(__item, __list, __head) \
+ for ((__item) = DRMLISTENTRY(typeof(*__item), (__list)->next, __head); \
+ &(__item)->__head != (__list); \
+ (__item) = DRMLISTENTRY(typeof(*__item), \
+ (__item)->__head.next, __head))
+
+#define DRMLISTFOREACHENTRYSAFE(__item, __temp, __list, __head) \
+ for ((__item) = DRMLISTENTRY(typeof(*__item), (__list)->next, __head), \
+ (__temp) = DRMLISTENTRY(typeof(*__item), \
+ (__item)->__head.next, __head); \
+ &(__item)->__head != (__list); \
+ (__item) = (__temp), \
+ (__temp) = DRMLISTENTRY(typeof(*__item), \
+ (__temp)->__head.next, __head))
+
+#define DRMLISTJOIN(__list, __join) if (!DRMLISTEMPTY(__list)) { \
+ (__list)->next->prev = (__join); \
+ (__list)->prev->next = (__join)->next; \
+ (__join)->next->prev = (__list)->prev; \
+ (__join)->next = (__list)->next; \
+}
diff --git a/chromium/third_party/libdrm/src/libdrm_macros.h b/chromium/third_party/libdrm/src/libdrm_macros.h
new file mode 100644
index 00000000000..639d09047ef
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libdrm_macros.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef LIBDRM_LIBDRM_H
+#define LIBDRM_LIBDRM_H
+
+#if defined(HAVE_VISIBILITY)
+# define drm_private __attribute__((visibility("hidden")))
+#else
+# define drm_private
+#endif
+
+
+/**
+ * Static (compile-time) assertion.
+ * Basically, use COND to dimension an array. If COND is false/zero the
+ * array size will be -1 and we'll get a compilation error.
+ */
+#define STATIC_ASSERT(COND) \
+ do { \
+ (void) sizeof(char [1 - 2*!(COND)]); \
+ } while (0)
+
+
+#include <sys/mman.h>
+
+#if defined(ANDROID) && !defined(__LP64__)
+#include <errno.h> /* for EINVAL */
+
+extern void *__mmap2(void *, size_t, int, int, int, size_t);
+
+static inline void *drm_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, loff_t offset)
+{
+ /* offset must be aligned to 4096 (not necessarily the page size) */
+ if (offset & 4095) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+
+ return __mmap2(addr, length, prot, flags, fd, (size_t) (offset >> 12));
+}
+
+# define drm_munmap(addr, length) \
+ munmap(addr, length)
+
+
+#else
+
+/* assume large file support exists */
+# define drm_mmap(addr, length, prot, flags, fd, offset) \
+ mmap(addr, length, prot, flags, fd, offset)
+
+
+static inline int drm_munmap(void *addr, size_t length)
+{
+ /* Copied from configure code generated by AC_SYS_LARGEFILE */
+#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + \
+ (((off_t) 1 << 31) << 31))
+ STATIC_ASSERT(LARGE_OFF_T % 2147483629 == 721 &&
+ LARGE_OFF_T % 2147483647 == 1);
+#undef LARGE_OFF_T
+
+ return munmap(addr, length);
+}
+#endif
+
+#endif
diff --git a/chromium/third_party/libdrm/src/libkms/Android.mk b/chromium/third_party/libdrm/src/libkms/Android.mk
new file mode 100644
index 00000000000..5386c6f55ec
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/Android.mk
@@ -0,0 +1,57 @@
+DRM_GPU_DRIVERS := $(strip $(filter-out swrast, $(BOARD_GPU_DRIVERS)))
+
+intel_drivers := i915 i965 i915g ilo
+radeon_drivers := r300g r600g radeonsi
+rockchip_drivers := rockchip
+mediatek_drivers := mediatek
+nouveau_drivers := nouveau
+virgl_drivers := virgl
+vmwgfx_drivers := vmwgfx
+
+valid_drivers := \
+ $(intel_drivers) \
+ $(radeon_drivers) \
+ $(rockchip_drivers) \
+ $(mediatek_drivers) \
+ $(nouveau_drivers) \
+ $(virgl_drivers) \
+ $(vmwgfx_drivers)
+
+# warn about invalid drivers
+invalid_drivers := $(filter-out $(valid_drivers), $(DRM_GPU_DRIVERS))
+ifneq ($(invalid_drivers),)
+$(warning invalid GPU drivers: $(invalid_drivers))
+# tidy up
+DRM_GPU_DRIVERS := $(filter-out $(invalid_drivers), $(DRM_GPU_DRIVERS))
+endif
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+include $(LOCAL_PATH)/Makefile.sources
+
+LOCAL_SRC_FILES := $(patsubst %.h, , $(LIBKMS_FILES))
+
+ifneq ($(filter $(vmwgfx_drivers), $(DRM_GPU_DRIVERS)),)
+LOCAL_SRC_FILES += $(LIBKMS_VMWGFX_FILES)
+endif
+
+ifneq ($(filter $(intel_drivers), $(DRM_GPU_DRIVERS)),)
+LOCAL_SRC_FILES += $(LIBKMS_INTEL_FILES)
+endif
+
+ifneq ($(filter $(nouveau_drivers), $(DRM_GPU_DRIVERS)),)
+LOCAL_SRC_FILES += $(LIBKMS_NOUVEAU_FILES)
+endif
+
+ifneq ($(filter $(radeon_drivers), $(DRM_GPU_DRIVERS)),)
+LOCAL_SRC_FILES += $(LIBKMS_RADEON_FILES)
+endif
+
+LOCAL_MODULE := libkms
+LOCAL_MODULE_TAGS := optional
+LOCAL_SHARED_LIBRARIES := libdrm
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/chromium/third_party/libdrm/src/libkms/Makefile.am b/chromium/third_party/libdrm/src/libkms/Makefile.am
new file mode 100644
index 00000000000..7b14f958fc3
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/Makefile.am
@@ -0,0 +1,52 @@
+include Makefile.sources
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir)/include/drm \
+ -I$(top_srcdir)
+
+libkms_la_LTLIBRARIES = libkms.la
+libkms_ladir = $(libdir)
+libkms_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libkms_la_LIBADD = ../libdrm.la
+
+#if HAVE_LIBUDEV
+#libkms_la_LIBADD += $(LIBUDEV_LIBS)
+#endif
+
+libkms_la_SOURCES = $(LIBKMS_FILES)
+
+if HAVE_VMWGFX
+libkms_la_SOURCES += $(LIBKMS_VMWGFX_FILES)
+endif
+
+if HAVE_INTEL
+libkms_la_SOURCES += $(LIBKMS_INTEL_FILES)
+endif
+
+if HAVE_NOUVEAU
+libkms_la_SOURCES += $(LIBKMS_NOUVEAU_FILES)
+endif
+
+if HAVE_RADEON
+libkms_la_SOURCES += $(LIBKMS_RADEON_FILES)
+endif
+
+if HAVE_EXYNOS
+libkms_la_SOURCES += $(LIBKMS_EXYNOS_FILES)
+AM_CFLAGS += -I$(top_srcdir)/exynos
+endif
+
+if HAVE_MEDIATEK
+libkms_la_SOURCES += $(LIBKMS_MEDIATEK_FILES)
+AM_CFLAGS += -I$(top_srcdir)/mediatek
+endif
+
+libkmsincludedir = ${includedir}/libkms
+libkmsinclude_HEADERS = $(LIBKMS_H_FILES)
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libkms.pc
+
+TESTS = kms-symbol-check
+EXTRA_DIST = $(TESTS)
diff --git a/chromium/third_party/libdrm/src/libkms/Makefile.sources b/chromium/third_party/libdrm/src/libkms/Makefile.sources
new file mode 100644
index 00000000000..2c1a10a3182
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/Makefile.sources
@@ -0,0 +1,26 @@
+LIBKMS_FILES := \
+ internal.h \
+ linux.c \
+ dumb.c \
+ api.c
+
+LIBKMS_VMWGFX_FILES := \
+ vmwgfx.c
+
+LIBKMS_INTEL_FILES := \
+ intel.c
+
+LIBKMS_NOUVEAU_FILES := \
+ nouveau.c
+
+LIBKMS_RADEON_FILES := \
+ radeon.c
+
+LIBKMS_EXYNOS_FILES := \
+ exynos.c
+
+LIBKMS_MEDIATEK_FILES := \
+ mediatek.c
+
+LIBKMS_H_FILES := \
+ libkms.h
diff --git a/chromium/third_party/libdrm/src/libkms/api.c b/chromium/third_party/libdrm/src/libkms/api.c
new file mode 100644
index 00000000000..354d8a2ebe0
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/api.c
@@ -0,0 +1,143 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "libdrm_macros.h"
+#include "internal.h"
+
+int kms_create(int fd, struct kms_driver **out)
+{
+ return linux_create(fd, out);
+}
+
+int kms_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
+{
+ switch (key) {
+ case KMS_BO_TYPE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return kms->get_prop(kms, key, out);
+}
+
+int kms_destroy(struct kms_driver **kms)
+{
+ if (!(*kms))
+ return 0;
+
+ free(*kms);
+ *kms = NULL;
+ return 0;
+}
+
+int kms_bo_create(struct kms_driver *kms, const unsigned *attr, struct kms_bo **out)
+{
+ unsigned width = 0;
+ unsigned height = 0;
+ enum kms_bo_type type = KMS_BO_TYPE_SCANOUT_X8R8G8B8;
+ int i;
+
+ for (i = 0; attr[i];) {
+ unsigned key = attr[i++];
+ unsigned value = attr[i++];
+
+ switch (key) {
+ case KMS_WIDTH:
+ width = value;
+ break;
+ case KMS_HEIGHT:
+ height = value;
+ break;
+ case KMS_BO_TYPE:
+ type = value;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (width == 0 || height == 0)
+ return -EINVAL;
+
+ /* XXX sanity check type */
+
+ if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8 &&
+ (width != 64 || height != 64))
+ return -EINVAL;
+
+ return kms->bo_create(kms, width, height, type, attr, out);
+}
+
+int kms_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
+{
+ switch (key) {
+ case KMS_PITCH:
+ *out = bo->pitch;
+ break;
+ case KMS_HANDLE:
+ *out = bo->handle;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kms_bo_map(struct kms_bo *bo, void **out)
+{
+ return bo->kms->bo_map(bo, out);
+}
+
+int kms_bo_unmap(struct kms_bo *bo)
+{
+ return bo->kms->bo_unmap(bo);
+}
+
+int kms_bo_destroy(struct kms_bo **bo)
+{
+ int ret;
+
+ if (!(*bo))
+ return 0;
+
+ ret = (*bo)->kms->bo_destroy(*bo);
+ if (ret)
+ return ret;
+
+ *bo = NULL;
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/libkms/dumb.c b/chromium/third_party/libdrm/src/libkms/dumb.c
new file mode 100644
index 00000000000..b95a072c13d
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/dumb.c
@@ -0,0 +1,220 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "internal.h"
+
+#include <sys/ioctl.h>
+#include "xf86drm.h"
+#include "libdrm_macros.h"
+
+struct dumb_bo
+{
+ struct kms_bo base;
+ unsigned map_count;
+};
+
+static int
+dumb_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
+{
+ switch (key) {
+ case KMS_BO_TYPE:
+ *out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+dumb_destroy(struct kms_driver *kms)
+{
+ free(kms);
+ return 0;
+}
+
+static int
+dumb_bo_create(struct kms_driver *kms,
+ const unsigned width, const unsigned height,
+ const enum kms_bo_type type, const unsigned *attr,
+ struct kms_bo **out)
+{
+ struct drm_mode_create_dumb arg;
+ struct dumb_bo *bo;
+ int i, ret;
+
+ for (i = 0; attr[i]; i += 2) {
+ switch (attr[i]) {
+ case KMS_WIDTH:
+ case KMS_HEIGHT:
+ break;
+ case KMS_BO_TYPE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo)
+ return -ENOMEM;
+
+ memset(&arg, 0, sizeof(arg));
+
+ /* All BO_TYPE currently are 32bpp formats */
+ arg.bpp = 32;
+ arg.width = width;
+ arg.height = height;
+
+ ret = drmIoctl(kms->fd, DRM_IOCTL_MODE_CREATE_DUMB, &arg);
+ if (ret)
+ goto err_free;
+
+ bo->base.kms = kms;
+ bo->base.handle = arg.handle;
+ bo->base.size = arg.size;
+ bo->base.pitch = arg.pitch;
+
+ *out = &bo->base;
+
+ return 0;
+
+err_free:
+ free(bo);
+ return ret;
+}
+
+static int
+dumb_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
+{
+ switch (key) {
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+dumb_bo_map(struct kms_bo *_bo, void **out)
+{
+ struct dumb_bo *bo = (struct dumb_bo *)_bo;
+ struct drm_mode_map_dumb arg;
+ void *map = NULL;
+ int ret;
+
+ if (bo->base.ptr) {
+ bo->map_count++;
+ *out = bo->base.ptr;
+ return 0;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
+ if (ret)
+ return ret;
+
+ map = drm_mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.offset);
+ if (map == MAP_FAILED)
+ return -errno;
+
+ bo->base.ptr = map;
+ bo->map_count++;
+ *out = bo->base.ptr;
+
+ return 0;
+}
+
+static int
+dumb_bo_unmap(struct kms_bo *_bo)
+{
+ struct dumb_bo *bo = (struct dumb_bo *)_bo;
+ bo->map_count--;
+ return 0;
+}
+
+static int
+dumb_bo_destroy(struct kms_bo *_bo)
+{
+ struct dumb_bo *bo = (struct dumb_bo *)_bo;
+ struct drm_mode_destroy_dumb arg;
+ int ret;
+
+ if (bo->base.ptr) {
+ /* XXX Sanity check map_count */
+ drm_munmap(bo->base.ptr, bo->base.size);
+ bo->base.ptr = NULL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &arg);
+ if (ret)
+ return -errno;
+
+ free(bo);
+ return 0;
+}
+
+drm_private int
+dumb_create(int fd, struct kms_driver **out)
+{
+ struct kms_driver *kms;
+ int ret;
+ uint64_t cap = 0;
+
+ ret = drmGetCap(fd, DRM_CAP_DUMB_BUFFER, &cap);
+ if (ret || cap == 0)
+ return -EINVAL;
+
+ kms = calloc(1, sizeof(*kms));
+ if (!kms)
+ return -ENOMEM;
+
+ kms->fd = fd;
+
+ kms->bo_create = dumb_bo_create;
+ kms->bo_map = dumb_bo_map;
+ kms->bo_unmap = dumb_bo_unmap;
+ kms->bo_get_prop = dumb_bo_get_prop;
+ kms->bo_destroy = dumb_bo_destroy;
+ kms->get_prop = dumb_get_prop;
+ kms->destroy = dumb_destroy;
+ *out = kms;
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/libkms/exynos.c b/chromium/third_party/libdrm/src/libkms/exynos.c
new file mode 100644
index 00000000000..5de2e5a95ba
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/exynos.c
@@ -0,0 +1,209 @@
+/* exynos.c
+ *
+ * Copyright 2009 Samsung Electronics Co., Ltd.
+ * Authors:
+ * SooChan Lim <sc1.lim@samsung.com>
+ * Sangjin LEE <lsj119@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "internal.h"
+
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include "xf86drm.h"
+
+#include "libdrm_macros.h"
+#include "exynos_drm.h"
+
+struct exynos_bo
+{
+ struct kms_bo base;
+ unsigned map_count;
+};
+
+static int
+exynos_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
+{
+ switch (key) {
+ case KMS_BO_TYPE:
+ *out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+exynos_destroy(struct kms_driver *kms)
+{
+ free(kms);
+ return 0;
+}
+
+static int
+exynos_bo_create(struct kms_driver *kms,
+ const unsigned width, const unsigned height,
+ const enum kms_bo_type type, const unsigned *attr,
+ struct kms_bo **out)
+{
+ struct drm_exynos_gem_create arg;
+ unsigned size, pitch;
+ struct exynos_bo *bo;
+ int i, ret;
+
+ for (i = 0; attr[i]; i += 2) {
+ switch (attr[i]) {
+ case KMS_WIDTH:
+ case KMS_HEIGHT:
+ case KMS_BO_TYPE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo)
+ return -ENOMEM;
+
+ if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8) {
+ pitch = 64 * 4;
+ size = 64 * 64 * 4;
+ } else if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8) {
+ pitch = width * 4;
+ pitch = (pitch + 512 - 1) & ~(512 - 1);
+ size = pitch * ((height + 4 - 1) & ~(4 - 1));
+ } else {
+ return -EINVAL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.size = size;
+
+ ret = drmCommandWriteRead(kms->fd, DRM_EXYNOS_GEM_CREATE, &arg, sizeof(arg));
+ if (ret)
+ goto err_free;
+
+ bo->base.kms = kms;
+ bo->base.handle = arg.handle;
+ bo->base.size = size;
+ bo->base.pitch = pitch;
+
+ *out = &bo->base;
+
+ return 0;
+
+err_free:
+ free(bo);
+ return ret;
+}
+
+static int
+exynos_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
+{
+ switch (key) {
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+exynos_bo_map(struct kms_bo *_bo, void **out)
+{
+ struct exynos_bo *bo = (struct exynos_bo *)_bo;
+ struct drm_mode_map_dumb arg;
+ void *map = NULL;
+ int ret;
+
+ if (bo->base.ptr) {
+ bo->map_count++;
+ *out = bo->base.ptr;
+ return 0;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
+ if (ret)
+ return ret;
+
+ map = drm_mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.offset);
+ if (map == MAP_FAILED)
+ return -errno;
+
+ bo->base.ptr = map;
+ bo->map_count++;
+ *out = bo->base.ptr;
+
+ return 0;
+}
+
+static int
+exynos_bo_unmap(struct kms_bo *_bo)
+{
+ struct exynos_bo *bo = (struct exynos_bo *)_bo;
+ bo->map_count--;
+ return 0;
+}
+
+static int
+exynos_bo_destroy(struct kms_bo *_bo)
+{
+ struct exynos_bo *bo = (struct exynos_bo *)_bo;
+ struct drm_gem_close arg;
+ int ret;
+
+ if (bo->base.ptr) {
+ /* XXX Sanity check map_count */
+ munmap(bo->base.ptr, bo->base.size);
+ bo->base.ptr = NULL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
+ if (ret)
+ return -errno;
+
+ free(bo);
+ return 0;
+}
+
+drm_private int
+exynos_create(int fd, struct kms_driver **out)
+{
+ struct kms_driver *kms;
+
+ kms = calloc(1, sizeof(*kms));
+ if (!kms)
+ return -ENOMEM;
+
+ kms->fd = fd;
+
+ kms->bo_create = exynos_bo_create;
+ kms->bo_map = exynos_bo_map;
+ kms->bo_unmap = exynos_bo_unmap;
+ kms->bo_get_prop = exynos_bo_get_prop;
+ kms->bo_destroy = exynos_bo_destroy;
+ kms->get_prop = exynos_get_prop;
+ kms->destroy = exynos_destroy;
+ *out = kms;
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/libkms/intel.c b/chromium/third_party/libdrm/src/libkms/intel.c
new file mode 100644
index 00000000000..3d8ca055f27
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/intel.c
@@ -0,0 +1,240 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "internal.h"
+
+#include <sys/ioctl.h>
+#include "xf86drm.h"
+#include "libdrm_macros.h"
+
+#include "i915_drm.h"
+
+struct intel_bo
+{
+ struct kms_bo base;
+ unsigned map_count;
+};
+
+static int
+intel_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
+{
+ switch (key) {
+ case KMS_BO_TYPE:
+ *out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+intel_destroy(struct kms_driver *kms)
+{
+ free(kms);
+ return 0;
+}
+
+static int
+intel_bo_create(struct kms_driver *kms,
+ const unsigned width, const unsigned height,
+ const enum kms_bo_type type, const unsigned *attr,
+ struct kms_bo **out)
+{
+ struct drm_i915_gem_create arg;
+ unsigned size, pitch;
+ struct intel_bo *bo;
+ int i, ret;
+
+ for (i = 0; attr[i]; i += 2) {
+ switch (attr[i]) {
+ case KMS_WIDTH:
+ case KMS_HEIGHT:
+ case KMS_BO_TYPE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo)
+ return -ENOMEM;
+
+ if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8) {
+ pitch = 64 * 4;
+ size = 64 * 64 * 4;
+ } else if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8) {
+ pitch = width * 4;
+ pitch = (pitch + 512 - 1) & ~(512 - 1);
+ size = pitch * ((height + 4 - 1) & ~(4 - 1));
+ } else {
+ free(bo);
+ return -EINVAL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.size = size;
+
+ ret = drmCommandWriteRead(kms->fd, DRM_I915_GEM_CREATE, &arg, sizeof(arg));
+ if (ret)
+ goto err_free;
+
+ bo->base.kms = kms;
+ bo->base.handle = arg.handle;
+ bo->base.size = size;
+ bo->base.pitch = pitch;
+
+ *out = &bo->base;
+ if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8 && pitch > 512) {
+ struct drm_i915_gem_set_tiling tile;
+
+ memset(&tile, 0, sizeof(tile));
+ tile.handle = bo->base.handle;
+ tile.tiling_mode = I915_TILING_X;
+ tile.stride = bo->base.pitch;
+
+ ret = drmCommandWriteRead(kms->fd, DRM_I915_GEM_SET_TILING, &tile, sizeof(tile));
+#if 0
+ if (ret) {
+ kms_bo_destroy(out);
+ return ret;
+ }
+#endif
+ }
+
+ return 0;
+
+err_free:
+ free(bo);
+ return ret;
+}
+
+static int
+intel_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
+{
+ switch (key) {
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+intel_bo_map(struct kms_bo *_bo, void **out)
+{
+ struct intel_bo *bo = (struct intel_bo *)_bo;
+ struct drm_i915_gem_mmap_gtt arg;
+ void *map = NULL;
+ int ret;
+
+ if (bo->base.ptr) {
+ bo->map_count++;
+ *out = bo->base.ptr;
+ return 0;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmCommandWriteRead(bo->base.kms->fd, DRM_I915_GEM_MMAP_GTT, &arg, sizeof(arg));
+ if (ret)
+ return ret;
+
+ map = drm_mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.offset);
+ if (map == MAP_FAILED)
+ return -errno;
+
+ bo->base.ptr = map;
+ bo->map_count++;
+ *out = bo->base.ptr;
+
+ return 0;
+}
+
+static int
+intel_bo_unmap(struct kms_bo *_bo)
+{
+ struct intel_bo *bo = (struct intel_bo *)_bo;
+ bo->map_count--;
+ return 0;
+}
+
+static int
+intel_bo_destroy(struct kms_bo *_bo)
+{
+ struct intel_bo *bo = (struct intel_bo *)_bo;
+ struct drm_gem_close arg;
+ int ret;
+
+ if (bo->base.ptr) {
+ /* XXX Sanity check map_count */
+ drm_munmap(bo->base.ptr, bo->base.size);
+ bo->base.ptr = NULL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
+ if (ret)
+ return -errno;
+
+ free(bo);
+ return 0;
+}
+
+drm_private int
+intel_create(int fd, struct kms_driver **out)
+{
+ struct kms_driver *kms;
+
+ kms = calloc(1, sizeof(*kms));
+ if (!kms)
+ return -ENOMEM;
+
+ kms->fd = fd;
+
+ kms->bo_create = intel_bo_create;
+ kms->bo_map = intel_bo_map;
+ kms->bo_unmap = intel_bo_unmap;
+ kms->bo_get_prop = intel_bo_get_prop;
+ kms->bo_destroy = intel_bo_destroy;
+ kms->get_prop = intel_get_prop;
+ kms->destroy = intel_destroy;
+ *out = kms;
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/libkms/internal.h b/chromium/third_party/libdrm/src/libkms/internal.h
new file mode 100644
index 00000000000..845c57d5925
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/internal.h
@@ -0,0 +1,86 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef INTERNAL_H_
+#define INTERNAL_H_
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "libdrm_macros.h"
+#include "libkms.h"
+
+struct kms_driver
+{
+ int (*get_prop)(struct kms_driver *kms, const unsigned key,
+ unsigned *out);
+ int (*destroy)(struct kms_driver *kms);
+
+ int (*bo_create)(struct kms_driver *kms,
+ unsigned width,
+ unsigned height,
+ enum kms_bo_type type,
+ const unsigned *attr,
+ struct kms_bo **out);
+ int (*bo_get_prop)(struct kms_bo *bo, const unsigned key,
+ unsigned *out);
+ int (*bo_map)(struct kms_bo *bo, void **out);
+ int (*bo_unmap)(struct kms_bo *bo);
+ int (*bo_destroy)(struct kms_bo *bo);
+
+ int fd;
+};
+
+struct kms_bo
+{
+ struct kms_driver *kms;
+ void *ptr;
+ size_t size;
+ size_t offset;
+ size_t pitch;
+ unsigned handle;
+};
+
+drm_private int linux_create(int fd, struct kms_driver **out);
+
+drm_private int vmwgfx_create(int fd, struct kms_driver **out);
+
+drm_private int intel_create(int fd, struct kms_driver **out);
+
+drm_private int dumb_create(int fd, struct kms_driver **out);
+
+drm_private int nouveau_create(int fd, struct kms_driver **out);
+
+drm_private int radeon_create(int fd, struct kms_driver **out);
+
+drm_private int exynos_create(int fd, struct kms_driver **out);
+
+drm_private int mediatek_create(int fd, struct kms_driver **out);
+
+#endif
diff --git a/chromium/third_party/libdrm/src/libkms/kms-symbol-check b/chromium/third_party/libdrm/src/libkms/kms-symbol-check
new file mode 100755
index 00000000000..658b2692656
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/kms-symbol-check
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+# The following symbols (past the first five) are taken from the public headers.
+# A list of the latter should be available Makefile.sources/LIBKMS_H_FILES
+
+FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libkms.so} | awk '{print $3}'| while read func; do
+( grep -q "^$func$" || echo $func ) <<EOF
+__bss_start
+_edata
+_end
+_fini
+_init
+kms_bo_create
+kms_bo_destroy
+kms_bo_get_prop
+kms_bo_map
+kms_bo_unmap
+kms_create
+kms_destroy
+kms_get_prop
+EOF
+done)
+
+test ! -n "$FUNCS" || echo $FUNCS
+test ! -n "$FUNCS"
diff --git a/chromium/third_party/libdrm/src/libkms/libkms.h b/chromium/third_party/libdrm/src/libkms/libkms.h
new file mode 100644
index 00000000000..930a2bfc5ac
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/libkms.h
@@ -0,0 +1,82 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef _LIBKMS_H_
+#define _LIBKMS_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * \file
+ *
+ */
+
+struct kms_driver;
+struct kms_bo;
+
+enum kms_attrib
+{
+ KMS_TERMINATE_PROP_LIST,
+#define KMS_TERMINATE_PROP_LIST KMS_TERMINATE_PROP_LIST
+ KMS_BO_TYPE,
+#define KMS_BO_TYPE KMS_BO_TYPE
+ KMS_WIDTH,
+#define KMS_WIDTH KMS_WIDTH
+ KMS_HEIGHT,
+#define KMS_HEIGHT KMS_HEIGHT
+ KMS_PITCH,
+#define KMS_PITCH KMS_PITCH
+ KMS_HANDLE,
+#define KMS_HANDLE KMS_HANDLE
+};
+
+enum kms_bo_type
+{
+ KMS_BO_TYPE_SCANOUT_X8R8G8B8 = (1 << 0),
+#define KMS_BO_TYPE_SCANOUT_X8R8G8B8 KMS_BO_TYPE_SCANOUT_X8R8G8B8
+ KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8 = (1 << 1),
+#define KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8 KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8
+};
+
+int kms_create(int fd, struct kms_driver **out);
+int kms_get_prop(struct kms_driver *kms, unsigned key, unsigned *out);
+int kms_destroy(struct kms_driver **kms);
+
+int kms_bo_create(struct kms_driver *kms, const unsigned *attr, struct kms_bo **out);
+int kms_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out);
+int kms_bo_map(struct kms_bo *bo, void **out);
+int kms_bo_unmap(struct kms_bo *bo);
+int kms_bo_destroy(struct kms_bo **bo);
+
+#if defined(__cplusplus)
+};
+#endif
+
+#endif
diff --git a/chromium/third_party/libdrm/src/libkms/libkms.pc.in b/chromium/third_party/libdrm/src/libkms/libkms.pc.in
new file mode 100644
index 00000000000..1421b3eaae5
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/libkms.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libkms
+Description: Library that abstract aways the different mm interface for kernel drivers
+Version: 1.0.0
+Libs: -L${libdir} -lkms
+Cflags: -I${includedir}/libkms
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/libkms/linux.c b/chromium/third_party/libdrm/src/libkms/linux.c
new file mode 100644
index 00000000000..85e06cdbd6c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/linux.c
@@ -0,0 +1,250 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Thanks to krh and jcristau for the tips on
+ * going from fd to pci id via fstat and udev.
+ */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <xf86drm.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#ifdef MAJOR_IN_MKDEV
+#include <sys/mkdev.h>
+#endif
+#ifdef MAJOR_IN_SYSMACROS
+#include <sys/sysmacros.h>
+#endif
+
+#include "libdrm_macros.h"
+#include "internal.h"
+
+#define PATH_SIZE 512
+
+static int
+linux_name_from_sysfs(int fd, char **out)
+{
+ char path[PATH_SIZE+1] = ""; /* initialize to please valgrind */
+ char link[PATH_SIZE+1] = "";
+ struct stat buffer;
+ unsigned maj, min;
+ char* slash_name;
+ int ret;
+
+ /*
+ * Inside the sysfs directory for the device there is a symlink
+ * to the directory representing the driver module, that path
+ * happens to hold the name of the driver.
+ *
+ * So lets get the symlink for the drm device. Then read the link
+ * and filter out the last directory which happens to be the name
+ * of the driver, which we can use to load the correct interface.
+ *
+ * Thanks to Ray Strode of Plymouth for the code.
+ */
+
+ ret = fstat(fd, &buffer);
+ if (ret)
+ return -EINVAL;
+
+ if (!S_ISCHR(buffer.st_mode))
+ return -EINVAL;
+
+ maj = major(buffer.st_rdev);
+ min = minor(buffer.st_rdev);
+
+ snprintf(path, PATH_SIZE, "/sys/dev/char/%d:%d/device/driver", maj, min);
+
+ if (readlink(path, link, PATH_SIZE) < 0)
+ return -EINVAL;
+
+ /* link looks something like this: ../../../bus/pci/drivers/intel */
+ slash_name = strrchr(link, '/');
+ if (!slash_name)
+ return -EINVAL;
+
+ /* copy name and at the same time remove the slash */
+ *out = strdup(slash_name + 1);
+ return 0;
+}
+
+static int
+linux_from_sysfs(int fd, struct kms_driver **out)
+{
+ char *name;
+ int ret;
+
+ ret = linux_name_from_sysfs(fd, &name);
+ if (ret)
+ return ret;
+
+#ifdef HAVE_INTEL
+ if (!strcmp(name, "intel"))
+ ret = intel_create(fd, out);
+ else
+#endif
+#ifdef HAVE_VMWGFX
+ if (!strcmp(name, "vmwgfx"))
+ ret = vmwgfx_create(fd, out);
+ else
+#endif
+#ifdef HAVE_NOUVEAU
+ if (!strcmp(name, "nouveau"))
+ ret = nouveau_create(fd, out);
+ else
+#endif
+#ifdef HAVE_RADEON
+ if (!strcmp(name, "radeon"))
+ ret = radeon_create(fd, out);
+ else
+#endif
+#ifdef HAVE_EXYNOS
+ if (!strcmp(name, "exynos"))
+ ret = exynos_create(fd, out);
+ else
+#endif
+#ifdef HAVE_MEDIATEK
+ if (!strcmp(name, "mediatek"))
+ ret = mediatek_create(fd, out);
+ else
+#endif
+ ret = -ENOSYS;
+
+ free(name);
+ return ret;
+}
+
+#if 0
+#define LIBUDEV_I_KNOW_THE_API_IS_SUBJECT_TO_CHANGE
+#include <libudev.h>
+
+struct create_record
+{
+ unsigned vendor;
+ unsigned chip;
+ int (*func)(int fd, struct kms_driver **out);
+};
+
+static const struct create_record table[] = {
+ { 0x8086, 0x2a42, intel_create }, /* i965 */
+#ifdef HAVE_VMWGFX
+ { 0x15ad, 0x0405, vmwgfx_create }, /* VMware vGPU */
+#endif
+ { 0, 0, NULL },
+};
+
+static int
+linux_get_pciid_from_fd(int fd, unsigned *vendor_id, unsigned *chip_id)
+{
+ struct udev *udev;
+ struct udev_device *device;
+ struct udev_device *parent;
+ const char *pci_id;
+ struct stat buffer;
+ int ret;
+
+ ret = fstat(fd, &buffer);
+ if (ret)
+ return -EINVAL;
+
+ if (!S_ISCHR(buffer.st_mode))
+ return -EINVAL;
+
+ udev = udev_new();
+ if (!udev)
+ return -ENOMEM;
+
+ device = udev_device_new_from_devnum(udev, 'c', buffer.st_rdev);
+ if (!device)
+ goto err_free_udev;
+
+ parent = udev_device_get_parent(device);
+ if (!parent)
+ goto err_free_device;
+
+ pci_id = udev_device_get_property_value(parent, "PCI_ID");
+ if (!pci_id)
+ goto err_free_device;
+
+ if (sscanf(pci_id, "%x:%x", vendor_id, chip_id) != 2)
+ goto err_free_device;
+
+ udev_device_unref(device);
+ udev_unref(udev);
+
+ return 0;
+
+err_free_device:
+ udev_device_unref(device);
+err_free_udev:
+ udev_unref(udev);
+ return -EINVAL;
+}
+
+static int
+linux_from_udev(int fd, struct kms_driver **out)
+{
+ unsigned vendor_id, chip_id;
+ int ret, i;
+
+ ret = linux_get_pciid_from_fd(fd, &vendor_id, &chip_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; table[i].func; i++)
+ if (table[i].vendor == vendor_id && table[i].chip == chip_id)
+ return table[i].func(fd, out);
+
+ return -ENOSYS;
+}
+#else
+static int
+linux_from_udev(int fd, struct kms_driver **out)
+{
+ return -ENOSYS;
+}
+#endif
+
+drm_private int
+linux_create(int fd, struct kms_driver **out)
+{
+ if (!dumb_create(fd, out))
+ return 0;
+
+ if (!linux_from_udev(fd, out))
+ return 0;
+
+ return linux_from_sysfs(fd, out);
+}
diff --git a/chromium/third_party/libdrm/src/libkms/mediatek.c b/chromium/third_party/libdrm/src/libkms/mediatek.c
new file mode 100644
index 00000000000..b1a54c00311
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/mediatek.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author:JB TSAI <jb.tsai@mediatek.com>
+ *
+ * based on rockchip_drm.c
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "internal.h"
+
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include "xf86drm.h"
+
+#include "mediatek_drm.h"
+
+struct mediatek_bo
+{
+ struct kms_bo base;
+ unsigned map_count;
+};
+
+static int
+mediatek_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
+{
+ switch (key) {
+ case KMS_BO_TYPE:
+ *out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mediatek_destroy(struct kms_driver *kms)
+{
+ free(kms);
+ return 0;
+}
+
+static int
+mediatek_bo_create(struct kms_driver *kms,
+ const unsigned width, const unsigned height,
+ const enum kms_bo_type type, const unsigned *attr,
+ struct kms_bo **out)
+{
+ struct drm_mtk_gem_create arg;
+ unsigned size, pitch;
+ struct mediatek_bo *bo;
+ int i, ret;
+
+ for (i = 0; attr[i]; i += 2) {
+ switch (attr[i]) {
+ case KMS_WIDTH:
+ case KMS_HEIGHT:
+ case KMS_BO_TYPE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo)
+ return -ENOMEM;
+
+ if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8) {
+ pitch = 64 * 4;
+ size = 64 * 64 * 4;
+ } else if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8) {
+ pitch = width * 4;
+ pitch = (pitch + 512 - 1) & ~(512 - 1);
+ size = pitch * ((height + 4 - 1) & ~(4 - 1));
+ } else {
+ return -EINVAL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.size = size;
+
+ ret = drmCommandWriteRead(kms->fd, DRM_MTK_GEM_CREATE, &arg, sizeof(arg));
+ if (ret)
+ goto err_free;
+
+ bo->base.kms = kms;
+ bo->base.handle = arg.handle;
+ bo->base.size = size;
+ bo->base.pitch = pitch;
+
+ *out = &bo->base;
+
+ return 0;
+
+err_free:
+ free(bo);
+ return ret;
+}
+
+static int
+mediatek_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
+{
+ switch (key) {
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+mediatek_bo_map(struct kms_bo *_bo, void **out)
+{
+ struct mediatek_bo *bo = (struct mediatek_bo *)_bo;
+ struct drm_mtk_gem_map_off arg;
+ void *map = NULL;
+ int ret;
+
+ if (bo->base.ptr) {
+ bo->map_count++;
+ *out = bo->base.ptr;
+ return 0;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmCommandWriteRead(bo->base.kms->fd, DRM_MTK_GEM_MAP_OFFSET, &arg, sizeof(arg));
+ if (ret)
+ return ret;
+
+ map = mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.offset);
+ if (map == MAP_FAILED)
+ return -errno;
+
+ bo->base.ptr = map;
+ bo->map_count++;
+ *out = bo->base.ptr;
+
+ return 0;
+}
+
+static int
+mediatek_bo_unmap(struct kms_bo *_bo)
+{
+ struct mediatek_bo *bo = (struct mediatek_bo *)_bo;
+ bo->map_count--;
+ return 0;
+}
+
+static int
+mediatek_bo_destroy(struct kms_bo *_bo)
+{
+ struct mediatek_bo *bo = (struct mediatek_bo *)_bo;
+ struct drm_gem_close arg;
+ int ret;
+
+ if (bo->base.ptr) {
+ /* XXX Sanity check map_count */
+ munmap(bo->base.ptr, bo->base.size);
+ bo->base.ptr = NULL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
+ if (ret)
+ return -errno;
+
+ free(bo);
+ return 0;
+}
+
+int
+mediatek_create(int fd, struct kms_driver **out)
+{
+ struct kms_driver *kms;
+
+ kms = calloc(1, sizeof(*kms));
+ if (!kms)
+ return -ENOMEM;
+
+ kms->fd = fd;
+
+ kms->bo_create = mediatek_bo_create;
+ kms->bo_map = mediatek_bo_map;
+ kms->bo_unmap = mediatek_bo_unmap;
+ kms->bo_get_prop = mediatek_bo_get_prop;
+ kms->bo_destroy = mediatek_bo_destroy;
+ kms->get_prop = mediatek_get_prop;
+ kms->destroy = mediatek_destroy;
+ *out = kms;
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/libkms/nouveau.c b/chromium/third_party/libdrm/src/libkms/nouveau.c
new file mode 100644
index 00000000000..d10e0fdb056
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/nouveau.c
@@ -0,0 +1,222 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "internal.h"
+
+#include <sys/ioctl.h>
+#include "xf86drm.h"
+#include "libdrm_macros.h"
+
+#include "nouveau_drm.h"
+
+struct nouveau_bo
+{
+ struct kms_bo base;
+ uint64_t map_handle;
+ unsigned map_count;
+};
+
+static int
+nouveau_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
+{
+ switch (key) {
+ case KMS_BO_TYPE:
+ *out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+nouveau_destroy(struct kms_driver *kms)
+{
+ free(kms);
+ return 0;
+}
+
+static int
+nouveau_bo_create(struct kms_driver *kms,
+ const unsigned width, const unsigned height,
+ const enum kms_bo_type type, const unsigned *attr,
+ struct kms_bo **out)
+{
+ struct drm_nouveau_gem_new arg;
+ unsigned size, pitch;
+ struct nouveau_bo *bo;
+ int i, ret;
+
+ for (i = 0; attr[i]; i += 2) {
+ switch (attr[i]) {
+ case KMS_WIDTH:
+ case KMS_HEIGHT:
+ case KMS_BO_TYPE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo)
+ return -ENOMEM;
+
+ if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8) {
+ pitch = 64 * 4;
+ size = 64 * 64 * 4;
+ } else if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8) {
+ pitch = width * 4;
+ pitch = (pitch + 512 - 1) & ~(512 - 1);
+ size = pitch * height;
+ } else {
+ free(bo);
+ return -EINVAL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.info.size = size;
+ arg.info.domain = NOUVEAU_GEM_DOMAIN_MAPPABLE | NOUVEAU_GEM_DOMAIN_VRAM;
+ arg.info.tile_mode = 0;
+ arg.info.tile_flags = 0;
+ arg.align = 512;
+ arg.channel_hint = 0;
+
+ ret = drmCommandWriteRead(kms->fd, DRM_NOUVEAU_GEM_NEW, &arg, sizeof(arg));
+ if (ret)
+ goto err_free;
+
+ bo->base.kms = kms;
+ bo->base.handle = arg.info.handle;
+ bo->base.size = size;
+ bo->base.pitch = pitch;
+ bo->map_handle = arg.info.map_handle;
+
+ *out = &bo->base;
+
+ return 0;
+
+err_free:
+ free(bo);
+ return ret;
+}
+
+static int
+nouveau_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
+{
+ switch (key) {
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+nouveau_bo_map(struct kms_bo *_bo, void **out)
+{
+ struct nouveau_bo *bo = (struct nouveau_bo *)_bo;
+ void *map = NULL;
+
+ if (bo->base.ptr) {
+ bo->map_count++;
+ *out = bo->base.ptr;
+ return 0;
+ }
+
+ map = drm_mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, bo->map_handle);
+ if (map == MAP_FAILED)
+ return -errno;
+
+ bo->base.ptr = map;
+ bo->map_count++;
+ *out = bo->base.ptr;
+
+ return 0;
+}
+
+static int
+nouveau_bo_unmap(struct kms_bo *_bo)
+{
+ struct nouveau_bo *bo = (struct nouveau_bo *)_bo;
+ bo->map_count--;
+ return 0;
+}
+
+static int
+nouveau_bo_destroy(struct kms_bo *_bo)
+{
+ struct nouveau_bo *bo = (struct nouveau_bo *)_bo;
+ struct drm_gem_close arg;
+ int ret;
+
+ if (bo->base.ptr) {
+ /* XXX Sanity check map_count */
+ drm_munmap(bo->base.ptr, bo->base.size);
+ bo->base.ptr = NULL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
+ if (ret)
+ return -errno;
+
+ free(bo);
+ return 0;
+}
+
+drm_private int
+nouveau_create(int fd, struct kms_driver **out)
+{
+ struct kms_driver *kms;
+
+ kms = calloc(1, sizeof(*kms));
+ if (!kms)
+ return -ENOMEM;
+
+ kms->fd = fd;
+
+ kms->bo_create = nouveau_bo_create;
+ kms->bo_map = nouveau_bo_map;
+ kms->bo_unmap = nouveau_bo_unmap;
+ kms->bo_get_prop = nouveau_bo_get_prop;
+ kms->bo_destroy = nouveau_bo_destroy;
+ kms->get_prop = nouveau_get_prop;
+ kms->destroy = nouveau_destroy;
+ *out = kms;
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/libkms/radeon.c b/chromium/third_party/libdrm/src/libkms/radeon.c
new file mode 100644
index 00000000000..aaeeaf31dc2
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/radeon.c
@@ -0,0 +1,243 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "internal.h"
+
+#include <sys/ioctl.h>
+#include "xf86drm.h"
+#include "libdrm_macros.h"
+
+#include "radeon_drm.h"
+
+
+#define ALIGNMENT 512
+
+struct radeon_bo
+{
+ struct kms_bo base;
+ unsigned map_count;
+};
+
+static int
+radeon_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
+{
+ switch (key) {
+ case KMS_BO_TYPE:
+ *out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+radeon_destroy(struct kms_driver *kms)
+{
+ free(kms);
+ return 0;
+}
+
+static int
+radeon_bo_create(struct kms_driver *kms,
+ const unsigned width, const unsigned height,
+ const enum kms_bo_type type, const unsigned *attr,
+ struct kms_bo **out)
+{
+ struct drm_radeon_gem_create arg;
+ unsigned size, pitch;
+ struct radeon_bo *bo;
+ int i, ret;
+
+ for (i = 0; attr[i]; i += 2) {
+ switch (attr[i]) {
+ case KMS_WIDTH:
+ case KMS_HEIGHT:
+ case KMS_BO_TYPE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (type) {
+ case KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8:
+ pitch = 4 * 64;
+ size = 4 * 64 * 64;
+ break;
+ case KMS_BO_TYPE_SCANOUT_X8R8G8B8:
+ pitch = width * 4;
+ pitch = (pitch + ALIGNMENT - 1) & ~(ALIGNMENT - 1);
+ size = pitch * height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo)
+ return -ENOMEM;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.size = size;
+ arg.alignment = ALIGNMENT;
+ arg.initial_domain = RADEON_GEM_DOMAIN_CPU;
+ arg.flags = 0;
+ arg.handle = 0;
+
+ ret = drmCommandWriteRead(kms->fd, DRM_RADEON_GEM_CREATE,
+ &arg, sizeof(arg));
+ if (ret)
+ goto err_free;
+
+ bo->base.kms = kms;
+ bo->base.handle = arg.handle;
+ bo->base.size = size;
+ bo->base.pitch = pitch;
+ bo->base.offset = 0;
+ bo->map_count = 0;
+
+ *out = &bo->base;
+
+ return 0;
+
+err_free:
+ free(bo);
+ return ret;
+}
+
+static int
+radeon_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
+{
+ switch (key) {
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+radeon_bo_map(struct kms_bo *_bo, void **out)
+{
+ struct radeon_bo *bo = (struct radeon_bo *)_bo;
+ struct drm_radeon_gem_mmap arg;
+ void *map = NULL;
+ int ret;
+
+ if (bo->base.ptr) {
+ bo->map_count++;
+ *out = bo->base.ptr;
+ return 0;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+ arg.offset = bo->base.offset;
+ arg.size = (uint64_t)bo->base.size;
+
+ ret = drmCommandWriteRead(bo->base.kms->fd, DRM_RADEON_GEM_MMAP,
+ &arg, sizeof(arg));
+ if (ret)
+ return -errno;
+
+ map = drm_mmap(0, arg.size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bo->base.kms->fd, arg.addr_ptr);
+ if (map == MAP_FAILED)
+ return -errno;
+
+ bo->base.ptr = map;
+ bo->map_count++;
+ *out = bo->base.ptr;
+
+ return 0;
+}
+
+static int
+radeon_bo_unmap(struct kms_bo *_bo)
+{
+ struct radeon_bo *bo = (struct radeon_bo *)_bo;
+ if (--bo->map_count == 0) {
+ drm_munmap(bo->base.ptr, bo->base.size);
+ bo->base.ptr = NULL;
+ }
+ return 0;
+}
+
+static int
+radeon_bo_destroy(struct kms_bo *_bo)
+{
+ struct radeon_bo *bo = (struct radeon_bo *)_bo;
+ struct drm_gem_close arg;
+ int ret;
+
+ if (bo->base.ptr) {
+ /* XXX Sanity check map_count */
+ drm_munmap(bo->base.ptr, bo->base.size);
+ bo->base.ptr = NULL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+
+ ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
+ if (ret)
+ return -errno;
+
+ free(bo);
+ return 0;
+}
+
+drm_private int
+radeon_create(int fd, struct kms_driver **out)
+{
+ struct kms_driver *kms;
+
+ kms = calloc(1, sizeof(*kms));
+ if (!kms)
+ return -ENOMEM;
+
+ kms->fd = fd;
+
+ kms->bo_create = radeon_bo_create;
+ kms->bo_map = radeon_bo_map;
+ kms->bo_unmap = radeon_bo_unmap;
+ kms->bo_get_prop = radeon_bo_get_prop;
+ kms->bo_destroy = radeon_bo_destroy;
+ kms->get_prop = radeon_get_prop;
+ kms->destroy = radeon_destroy;
+ *out = kms;
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/libkms/vmwgfx.c b/chromium/third_party/libdrm/src/libkms/vmwgfx.c
new file mode 100644
index 00000000000..6a24fd4dffa
--- /dev/null
+++ b/chromium/third_party/libdrm/src/libkms/vmwgfx.c
@@ -0,0 +1,208 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include "internal.h"
+
+#include "xf86drm.h"
+#include "libdrm_macros.h"
+#include "vmwgfx_drm.h"
+
+struct vmwgfx_bo
+{
+ struct kms_bo base;
+ uint64_t map_handle;
+ unsigned map_count;
+};
+
+static int
+vmwgfx_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
+{
+ switch (key) {
+ case KMS_BO_TYPE:
+ *out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+vmwgfx_destroy(struct kms_driver *kms)
+{
+ free(kms);
+ return 0;
+}
+
+static int
+vmwgfx_bo_create(struct kms_driver *kms,
+ const unsigned width, const unsigned height,
+ const enum kms_bo_type type, const unsigned *attr,
+ struct kms_bo **out)
+{
+ struct vmwgfx_bo *bo;
+ int i, ret;
+
+ for (i = 0; attr[i]; i += 2) {
+ switch (attr[i]) {
+ case KMS_WIDTH:
+ case KMS_HEIGHT:
+ case KMS_BO_TYPE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo)
+ return -EINVAL;
+
+ {
+ union drm_vmw_alloc_dmabuf_arg arg;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg.req;
+ struct drm_vmw_dmabuf_rep *rep = &arg.rep;
+
+ memset(&arg, 0, sizeof(arg));
+ req->size = width * height * 4;
+ bo->base.size = req->size;
+ bo->base.pitch = width * 4;
+ bo->base.kms = kms;
+
+ do {
+ ret = drmCommandWriteRead(bo->base.kms->fd,
+ DRM_VMW_ALLOC_DMABUF,
+ &arg, sizeof(arg));
+ } while (ret == -ERESTART);
+
+ if (ret)
+ goto err_free;
+
+ bo->base.handle = rep->handle;
+ bo->map_handle = rep->map_handle;
+ bo->base.handle = rep->cur_gmr_id;
+ bo->base.offset = rep->cur_gmr_offset;
+ }
+
+ *out = &bo->base;
+
+ return 0;
+
+err_free:
+ free(bo);
+ return ret;
+}
+
+static int
+vmwgfx_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
+{
+ switch (key) {
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+vmwgfx_bo_map(struct kms_bo *_bo, void **out)
+{
+ struct vmwgfx_bo *bo = (struct vmwgfx_bo *)_bo;
+ void *map;
+
+ if (bo->base.ptr) {
+ bo->map_count++;
+ *out = bo->base.ptr;
+ return 0;
+ }
+
+ map = drm_mmap(NULL, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, bo->map_handle);
+ if (map == MAP_FAILED)
+ return -errno;
+
+ bo->base.ptr = map;
+ bo->map_count++;
+ *out = bo->base.ptr;
+
+ return 0;
+}
+
+static int
+vmwgfx_bo_unmap(struct kms_bo *_bo)
+{
+ struct vmwgfx_bo *bo = (struct vmwgfx_bo *)_bo;
+ bo->map_count--;
+ return 0;
+}
+
+static int
+vmwgfx_bo_destroy(struct kms_bo *_bo)
+{
+ struct vmwgfx_bo *bo = (struct vmwgfx_bo *)_bo;
+ struct drm_vmw_unref_dmabuf_arg arg;
+
+ if (bo->base.ptr) {
+ /* XXX Sanity check map_count */
+ drm_munmap(bo->base.ptr, bo->base.size);
+ bo->base.ptr = NULL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo->base.handle;
+ drmCommandWrite(bo->base.kms->fd, DRM_VMW_UNREF_DMABUF, &arg, sizeof(arg));
+
+ free(bo);
+ return 0;
+}
+
+drm_private int
+vmwgfx_create(int fd, struct kms_driver **out)
+{
+ struct kms_driver *kms;
+
+ kms = calloc(1, sizeof(*kms));
+ if (!kms)
+ return -ENOMEM;
+
+ kms->fd = fd;
+
+ kms->bo_create = vmwgfx_bo_create;
+ kms->bo_map = vmwgfx_bo_map;
+ kms->bo_unmap = vmwgfx_bo_unmap;
+ kms->bo_get_prop = vmwgfx_bo_get_prop;
+ kms->bo_destroy = vmwgfx_bo_destroy;
+ kms->get_prop = vmwgfx_get_prop;
+ kms->destroy = vmwgfx_destroy;
+ *out = kms;
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/man/Makefile.am b/chromium/third_party/libdrm/src/man/Makefile.am
new file mode 100644
index 00000000000..00eb423458c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/man/Makefile.am
@@ -0,0 +1,62 @@
+#
+# This generates man-pages out of the Docbook XML files. Simply add your files
+# to the relevant *man_PRE array. If aliases are created, please add them to the
+# *man_aliases_PRE array so they get installed correctly.
+#
+
+libman_PRE = \
+ drmAvailable.xml \
+ drmHandleEvent.xml \
+ drmModeGetResources.xml
+
+miscman_PRE = \
+ drm.xml \
+ drm-kms.xml \
+ drm-memory.xml
+
+miscman_aliases_PRE = \
+ drm-mm.xml \
+ drm-gem.xml \
+ drm-ttm.xml
+
+libmandir = $(LIB_MAN_DIR)
+miscmandir = $(MISC_MAN_DIR)
+miscman_aliasesdir = $(MISC_MAN_DIR)
+
+libman_DATA = $(libman_PRE:.xml=.$(LIB_MAN_SUFFIX))
+miscman_DATA = $(miscman_PRE:.xml=.$(MISC_MAN_SUFFIX))
+miscman_aliases_DATA = $(miscman_aliases_PRE:.xml=.$(MISC_MAN_SUFFIX))
+
+XML_FILES = \
+ $(libman_PRE) \
+ $(miscman_PRE)
+
+MAN_FILES = \
+ $(libman_DATA) \
+ $(miscman_DATA) \
+ $(miscman_aliases_DATA)
+
+EXTRA_DIST = $(XML_FILES)
+CLEANFILES = $(MAN_FILES)
+
+XSLTPROC_FLAGS = \
+ --stringparam man.authors.section.enabled 0 \
+ --stringparam man.copyright.section.enabled 0 \
+ --stringparam funcsynopsis.style ansi \
+ --stringparam man.output.quietly 1 \
+ --nonet \
+ $(MANPAGES_STYLESHEET)
+
+XSLTPROC_PROCESS_MAN = \
+ $(AM_V_GEN)$(XSLTPROC) -o "$@" $(XSLTPROC_FLAGS) "$<"
+
+$(miscman_aliases_DATA): $(miscman_DATA)
+ $(AM_V_GEN)if test -n "$@" ; then $(SED) -i -e 's/^\.so \([a-z_]\+\)\.\([0-9]\)$$/\.so man\2\/\1\.\2/' "$@" ; fi
+
+SUFFIXES = .$(LIB_MAN_SUFFIX) .$(MISC_MAN_SUFFIX) .xml
+
+.xml.$(LIB_MAN_SUFFIX):
+ $(XSLTPROC_PROCESS_MAN)
+
+.xml.$(MISC_MAN_SUFFIX):
+ $(XSLTPROC_PROCESS_MAN)
diff --git a/chromium/third_party/libdrm/src/man/drm-kms.xml b/chromium/third_party/libdrm/src/man/drm-kms.xml
new file mode 100644
index 00000000000..ae38dc8d208
--- /dev/null
+++ b/chromium/third_party/libdrm/src/man/drm-kms.xml
@@ -0,0 +1,342 @@
+<?xml version='1.0'?> <!--*-nxml-*-->
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+
+<!--
+ Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
+ Dedicated to the Public Domain
+-->
+
+<refentry id="drm-kms">
+ <refentryinfo>
+ <title>Direct Rendering Manager</title>
+ <productname>libdrm</productname>
+ <date>September 2012</date>
+ <authorgroup>
+ <author>
+ <contrib>Developer</contrib>
+ <firstname>David</firstname>
+ <surname>Herrmann</surname>
+ <email>dh.herrmann@googlemail.com</email>
+ </author>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>drm-kms</refentrytitle>
+ <manvolnum>7</manvolnum>
+ </refmeta>
+
+ <refnamediv>
+ <refname>drm-kms</refname>
+ <refpurpose>Kernel Mode-Setting</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
+ <funcsynopsisinfo>#include &lt;xf86drmMode.h&gt;</funcsynopsisinfo>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para>Each DRM device provides access to manage which monitors and displays
+ are currently used and what frames to be displayed. This task is
+ called <emphasis>Kernel Mode-Setting</emphasis> (KMS). Historically,
+ this was done in user-space and called
+ <emphasis>User-space Mode-Setting</emphasis> (UMS). Almost all
+ open-source drivers now provide the KMS kernel API to do this in the
+ kernel, however, many non-open-source binary drivers from different
+ vendors still do not support this. You can use
+ <citerefentry><refentrytitle>drmModeSettingSupported</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ to check whether your driver supports this. To understand how KMS
+ works, we need to introduce 5 objects: <emphasis>CRTCs</emphasis>,
+ <emphasis>Planes</emphasis>, <emphasis>Encoders</emphasis>,
+ <emphasis>Connectors</emphasis> and
+ <emphasis>Framebuffers</emphasis>.
+
+ <variablelist>
+ <varlistentry>
+ <term>CRTCs</term>
+ <listitem>
+ <para>A <emphasis>CRTC</emphasis> short for
+ <emphasis>CRT Controller</emphasis> is an abstraction
+ representing a part of the chip that contains a pointer to a
+ scanout buffer. Therefore, the number of CRTCs available
+ determines how many independent scanout buffers can be active
+ at any given time. The CRTC structure contains several fields
+ to support this: a pointer to some video memory (abstracted as
+ a frame-buffer object), a list of driven connectors, a display
+ mode and an (x, y) offset into the video memory to support
+ panning or configurations where one piece of video memory
+ spans multiple CRTCs. A CRTC is the central point where
+ configuration of displays happens. You select which objects to
+ use, which modes and which parameters and then configure each
+ CRTC via
+ <citerefentry><refentrytitle>drmModeCrtcSet</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ to drive the display devices.</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Planes</term>
+ <listitem>
+ <para>A <emphasis>plane</emphasis> respresents an image source that
+ can be blended with or overlayed on top of a CRTC during the
+ scanout process. Planes are associated with a frame-buffer to
+ crop a portion of the image memory (source) and optionally
+ scale it to a destination size. The result is then blended
+ with or overlayed on top of a CRTC. Planes are not provided by
+ all hardware and the number of available planes is limited. If
+ planes are not available or if not enough planes are
+ available, the user should fall back to normal software
+ blending (via GPU or CPU).</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Encoders</term>
+ <listitem>
+ <para>An <emphasis>encoder</emphasis> takes pixel data from a CRTC
+ and converts it to a format suitable for any attached
+ connectors. On some devices, it may be possible to have a CRTC
+ send data to more than one encoder. In that case, both
+ encoders would receive data from the same scanout buffer,
+ resulting in a <emphasis>cloned</emphasis> display
+ configuration across the connectors attached to each
+ encoder.</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Connectors</term>
+ <listitem>
+ <para>A <emphasis>connector</emphasis> is the final destination of
+ pixel-data on a device, and usually connects directly to an
+ external display device like a monitor or laptop panel. A
+ connector can only be attached to one encoder at a time. The
+ connector is also the structure where information about the
+ attached display is kept, so it contains fields for display
+ data, <emphasis>EDID</emphasis> data,
+ <emphasis>DPMS</emphasis> and
+ <emphasis>connection status</emphasis>, and information about
+ modes supported on the attached displays.</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Framebuffers</term>
+ <listitem>
+ <para><emphasis>Framebuffers</emphasis> are abstract memory objects
+ that provide a source of pixel data to scanout to a CRTC.
+ Applications explicitly request the creation of framebuffers
+ and can control their behavior. Framebuffers rely on the
+ underneath memory manager for low-level memory operations.
+ When creating a framebuffer, applications pass a memory handle
+ through the API which is used as backing storage. The
+ framebuffer itself is only an abstract object with no data. It
+ just refers to memory buffers that must be created with the
+ <citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ API.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <refsect2>
+ <title>Mode-Setting</title>
+ <para>Before mode-setting can be performed, an application needs to call
+ <citerefentry><refentrytitle>drmSetMaster</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ to become <emphasis>DRM-Master</emphasis>. It then has exclusive
+ access to the KMS API. A call to
+ <citerefentry><refentrytitle>drmModeGetResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ returns a list of <emphasis>CRTCs</emphasis>,
+ <emphasis>Connectors</emphasis>, <emphasis>Encoders</emphasis> and
+ <emphasis>Planes</emphasis>.</para>
+
+ <para>Normal procedure now includes: First, you select which connectors
+ you want to use. Users are mostly interested in which monitor or
+ display-panel is active so you need to make sure to arrange them in
+ the correct logical order and select the correct ones to use. For
+ each connector, you need to find a CRTC to drive this connector. If
+ you want to clone output to two or more connectors, you may use a
+ single CRTC for all cloned connectors (if the hardware supports
+ this). To find a suitable CRTC, you need to iterate over the list of
+ encoders that are available for each connector. Each encoder
+ contains a list of CRTCs that it can work with and you simply select
+ one of these CRTCs. If you later program the CRTC to control a
+ connector, it automatically selects the best encoder. However, this
+ procedure is needed so your CRTC has at least one working encoder
+ for the selected connector. See the <emphasis>Examples</emphasis>
+ section below for more information.</para>
+
+ <para>All valid modes for a connector can be retrieved with a call to
+ <citerefentry><refentrytitle>drmModeGetConnector</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ You need to select the mode you want to use and save it. The first
+ mode in the list is the default mode with the highest resolution
+ possible and often a suitable choice.</para>
+
+ <para>After you have a working connector+CRTC+mode combination, you need
+ to create a framebuffer that is used for scanout. Memory buffer
+ allocation is driver-depedent and described in
+ <citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>.
+ You need to create a buffer big enough for your selected mode. Now
+ you can create a framebuffer object that uses your memory-buffer as
+ scanout buffer. You can do this with
+ <citerefentry><refentrytitle>drmModeAddFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ and
+ <citerefentry><refentrytitle>drmModeAddFB2</refentrytitle><manvolnum>3</manvolnum></citerefentry>.</para>
+
+ <para>As a last step, you want to program your CRTC to drive your selected
+ connector. You can do this with a call to
+ <citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>.</para>
+ </refsect2>
+
+ <refsect2>
+ <title>Page-Flipping</title>
+ <para>A call to
+ <citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ is executed immediately and forces the CRTC to use the new scanout
+ buffer. If you want smooth-transitions without tearing, you probably
+ use double-buffering. You need to create one framebuffer object for
+ each buffer you use. You can then call
+ <citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ on the next buffer to flip. If you want to synchronize your flips
+ with <emphasis>vertical-blanks</emphasis>, you can use
+ <citerefentry><refentrytitle>drmModePageFlip</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ which schedules your page-flip for the next
+ <emphasis>vblank</emphasis>.</para>
+ </refsect2>
+
+ <refsect2>
+ <title>Planes</title>
+ <para>Planes are controlled independently from CRTCs. That is, a call to
+ <citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ does not affect planes. Instead, you need to call
+ <citerefentry><refentrytitle>drmModeSetPlane</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ to configure a plane. This requires the plane ID, a CRTC, a
+ framebuffer and offsets into the plane-framebuffer and the
+ CRTC-framebuffer. The CRTC then blends the content from the plane
+ over the CRTC framebuffer buffer during scanout. As this does not
+ involve any software-blending, it is way faster than traditional
+ blending. However, plane resources are limited. See
+ <citerefentry><refentrytitle>drmModeGetPlaneResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ for more information.</para>
+ </refsect2>
+
+ <refsect2>
+ <title>Cursors</title>
+ <para>Similar to planes, many hardware also supports cursors. A cursor is
+ a very small buffer with an image that is blended over the CRTC
+ framebuffer. You can set a different cursor for each CRTC with
+ <citerefentry><refentrytitle>drmModeSetCursor</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ and move it on the screen with
+ <citerefentry><refentrytitle>drmModeMoveCursor</refentrytitle><manvolnum>3</manvolnum></citerefentry>.
+ This allows to move the cursor on the screen without rerendering. If
+ no hardware cursors are supported, you need to rerender for each
+ frame the cursor is moved.</para>
+ </refsect2>
+
+ </refsect1>
+
+ <refsect1>
+ <title>Examples</title>
+ <para>Some examples of how basic mode-setting can be done. See the man-page
+ of each DRM function for more information.</para>
+
+ <refsect2>
+ <title>CRTC/Encoder Selection</title>
+ <para>If you retrieved all display configuration information via
+ <citerefentry><refentrytitle>drmModeGetResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ as <structname>drmModeRes</structname> *<varname>res</varname>,
+ selected a connector from the list in
+ <varname>res</varname>-><structfield>connectors</structfield>
+ and retrieved the connector-information as
+ <structname>drmModeConnector</structname> *<varname>conn</varname>
+ via
+ <citerefentry><refentrytitle>drmModeGetConnector</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ then this example shows, how you can find a suitable CRTC id to
+ drive this connector. This function takes a file-descriptor to the
+ DRM device (see
+ <citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>)
+ as <varname>fd</varname>, a pointer to the retrieved resources as
+ <varname>res</varname> and a pointer to the selected connector as
+ <varname>conn</varname>. It returns an integer smaller than 0 on
+ failure, otherwise, a valid CRTC id is returned.</para>
+
+<programlisting>
+static int modeset_find_crtc(int fd, drmModeRes *res, drmModeConnector *conn)
+{
+ drmModeEncoder *enc;
+ unsigned int i, j;
+
+ /* iterate all encoders of this connector */
+ for (i = 0; i &lt; conn->count_encoders; ++i) {
+ enc = drmModeGetEncoder(fd, conn->encoders[i]);
+ if (!enc) {
+ /* cannot retrieve encoder, ignoring... */
+ continue;
+ }
+
+ /* iterate all global CRTCs */
+ for (j = 0; j &lt; res->count_crtcs; ++j) {
+ /* check whether this CRTC works with the encoder */
+ if (!(enc->possible_crtcs &amp; (1 &lt;&lt; j)))
+ continue;
+
+
+ /* Here you need to check that no other connector
+ * currently uses the CRTC with id "crtc". If you intend
+ * to drive one connector only, then you can skip this
+ * step. Otherwise, simply scan your list of configured
+ * connectors and CRTCs whether this CRTC is already
+ * used. If it is, then simply continue the search here. */
+ if (res->crtcs[j] "is unused") {
+ drmModeFreeEncoder(enc);
+ return res->crtcs[j];
+ }
+ }
+
+ drmModeFreeEncoder(enc);
+ }
+
+ /* cannot find a suitable CRTC */
+ return -ENOENT;
+}
+</programlisting>
+
+ </refsect2>
+
+ </refsect1>
+
+ <refsect1>
+ <title>Reporting Bugs</title>
+ <para>Bugs in this manual should be reported to
+ http://bugs.freedesktop.org under the "Mesa" product, with "Other" or
+ "libdrm" as the component.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>See Also</title>
+ <para>
+ <citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetConnector</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetEncoder</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeAddFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeAddFB2</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeRmFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModePageFlip</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetPlaneResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetPlane</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeSetPlane</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeSetCursor</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeMoveCursor</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmSetMaster</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmAvailable</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmCheckModesettingSupported</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ </para>
+ </refsect1>
+</refentry>
diff --git a/chromium/third_party/libdrm/src/man/drm-memory.xml b/chromium/third_party/libdrm/src/man/drm-memory.xml
new file mode 100644
index 00000000000..6b4f0759b00
--- /dev/null
+++ b/chromium/third_party/libdrm/src/man/drm-memory.xml
@@ -0,0 +1,430 @@
+<?xml version='1.0'?> <!--*-nxml-*-->
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+
+<!--
+ Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
+ Dedicated to the Public Domain
+-->
+
+<refentry id="drm-memory">
+ <refentryinfo>
+ <title>Direct Rendering Manager</title>
+ <productname>libdrm</productname>
+ <date>September 2012</date>
+ <authorgroup>
+ <author>
+ <contrib>Developer</contrib>
+ <firstname>David</firstname>
+ <surname>Herrmann</surname>
+ <email>dh.herrmann@googlemail.com</email>
+ </author>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>drm-memory</refentrytitle>
+ <manvolnum>7</manvolnum>
+ </refmeta>
+
+ <refnamediv>
+ <refname>drm-memory</refname>
+ <refname>drm-mm</refname>
+ <refname>drm-gem</refname>
+ <refname>drm-ttm</refname>
+ <refpurpose>DRM Memory Management</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para>Many modern high-end GPUs come with their own memory managers. They
+ even include several different caches that need to be synchronized
+ during access. Textures, framebuffers, command buffers and more need
+ to be stored in memory that can be accessed quickly by the GPU.
+ Therefore, memory management on GPUs is highly driver- and
+ hardware-dependent.</para>
+
+ <para>However, there are several frameworks in the kernel that are used by
+ more than one driver. These can be used for trivial mode-setting
+ without requiring driver-dependent code. But for
+ hardware-accelerated rendering you need to read the manual pages for
+ the driver you want to work with.</para>
+
+ <refsect2>
+ <title>Dumb-Buffers</title>
+ <para>Almost all in-kernel DRM hardware drivers support an API called
+ <emphasis>Dumb-Buffers</emphasis>. This API allows to create buffers
+ of arbitrary size that can be used for scanout. These buffers can be
+ memory mapped via
+ <citerefentry><refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum></citerefentry>
+ so you can render into them on the CPU. However, GPU access to these
+ buffers is often not possible. Therefore, they are fine for simple
+ tasks but not suitable for complex compositions and
+ renderings.</para>
+
+ <para>The <constant>DRM_IOCTL_MODE_CREATE_DUMB</constant> ioctl can be
+ used to create a dumb buffer. The kernel will return a 32bit handle
+ that can be used to manage the buffer with the DRM API. You can
+ create framebuffers with
+ <citerefentry><refentrytitle>drmModeAddFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ and use it for mode-setting and scanout. To access the buffer, you
+ first need to retrieve the offset of the buffer. The
+ <constant>DRM_IOCTL_MODE_MAP_DUMB</constant> ioctl requests the DRM
+ subsystem to prepare the buffer for memory-mapping and returns a
+ fake-offset that can be used with
+ <citerefentry><refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum></citerefentry>.</para>
+
+ <para>The <constant>DRM_IOCTL_MODE_CREATE_DUMB</constant> ioctl takes as
+ argument a structure of type
+ <structname>struct drm_mode_create_dumb</structname>:
+
+<programlisting>
+struct drm_mode_create_dumb {
+ __u32 height;
+ __u32 width;
+ __u32 bpp;
+ __u32 flags;
+
+ __u32 handle;
+ __u32 pitch;
+ __u64 size;
+};
+</programlisting>
+
+ The fields <structfield>height</structfield>,
+ <structfield>width</structfield>, <structfield>bpp</structfield> and
+ <structfield>flags</structfield> have to be provided by the caller.
+ The other fields are filled by the kernel with the return values.
+ <structfield>height</structfield> and
+ <structfield>width</structfield> are the dimensions of the
+ rectangular buffer that is created. <structfield>bpp</structfield>
+ is the number of bits-per-pixel and must be a multiple of
+ <literal>8</literal>. You most commonly want to pass
+ <literal>32</literal> here. The <structfield>flags</structfield>
+ field is currently unused and must be zeroed. Different flags to
+ modify the behavior may be added in the future. After calling the
+ ioctl, the <structfield>handle</structfield>,
+ <structfield>pitch</structfield> and <structfield>size</structfield>
+ fields are filled by the kernel. <structfield>handle</structfield>
+ is a 32bit gem handle that identifies the buffer. This is used by
+ several other calls that take a gem-handle or memory-buffer as
+ argument. The <structfield>pitch</structfield> field is the
+ pitch (or stride) of the new buffer. Most drivers use 32bit or 64bit
+ aligned stride-values. The <structfield>size</structfield> field
+ contains the absolute size in bytes of the buffer. This can normally
+ also be computed with
+ <emphasis>(height * pitch + width) * bpp / 4</emphasis>.</para>
+
+ <para>To prepare the buffer for
+ <citerefentry><refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum></citerefentry>
+ you need to use the <constant>DRM_IOCTL_MODE_MAP_DUMB</constant>
+ ioctl. It takes as argument a structure of type
+ <structname>struct drm_mode_map_dumb</structname>:
+
+<programlisting>
+struct drm_mode_map_dumb {
+ __u32 handle;
+ __u32 pad;
+
+ __u64 offset;
+};
+</programlisting>
+
+ You need to put the gem-handle that was previously retrieved via
+ <constant>DRM_IOCTL_MODE_CREATE_DUMB</constant> into the
+ <structfield>handle</structfield> field. The
+ <structfield>pad</structfield> field is unused padding and must be
+ zeroed. After completion, the <structfield>offset</structfield>
+ field will contain an offset that can be used with
+ <citerefentry><refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum></citerefentry>
+ on the DRM file-descriptor.</para>
+
+ <para>If you don't need your dumb-buffer, anymore, you have to destroy it
+ with <constant>DRM_IOCTL_MODE_DESTROY_DUMB</constant>. If you close
+ the DRM file-descriptor, all open dumb-buffers are automatically
+ destroyed. This ioctl takes as argument a structure of type
+ <structname>struct drm_mode_destroy_dumb</structname>:
+
+<programlisting>
+struct drm_mode_destroy_dumb {
+ __u32 handle;
+};
+</programlisting>
+
+ You only need to put your handle into the
+ <structfield>handle</structfield> field. After this call, the handle
+ is invalid and may be reused for new buffers by the dumb-API.</para>
+
+ </refsect2>
+
+ <refsect2>
+ <title>TTM</title>
+ <para><emphasis>TTM</emphasis> stands for
+ <emphasis>Translation Table Manager</emphasis> and is a generic
+ memory-manager provided by the kernel. It does not provide a common
+ user-space API so you need to look at each driver interface if you
+ want to use it. See for instance the radeon manpages for more
+ information on memory-management with radeon and TTM.</para>
+ </refsect2>
+
+ <refsect2>
+ <title>GEM</title>
+ <para><emphasis>GEM</emphasis> stands for
+ <emphasis>Graphics Execution Manager</emphasis> and is a generic DRM
+ memory-management framework in the kernel, that is used by many
+ different drivers. Gem is designed to manage graphics memory,
+ control access to the graphics device execution context and handle
+ essentially NUMA environment unique to modern graphics hardware. Gem
+ allows multiple applications to share graphics device resources
+ without the need to constantly reload the entire graphics card. Data
+ may be shared between multiple applications with gem ensuring that
+ the correct memory synchronization occurs.</para>
+
+ <para>Gem provides simple mechanisms to manage graphics data and control
+ execution flow within the linux DRM subsystem. However, gem is not a
+ complete framework that is fully driver independent. Instead, if
+ provides many functions that are shared between many drivers, but
+ each driver has to implement most of memory-management with
+ driver-dependent ioctls. This manpage tries to describe the
+ semantics (and if it applies, the syntax) that is shared between all
+ drivers that use gem.</para>
+
+ <para>All GEM APIs are defined as
+ <citerefentry><refentrytitle>ioctl</refentrytitle><manvolnum>2</manvolnum></citerefentry>
+ on the DRM file descriptor. An application must be authorized via
+ <citerefentry><refentrytitle>drmAuthMagic</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ to the current DRM-Master to access the GEM subsystem. A driver that
+ does not support gem will return <constant>ENODEV</constant> for all
+ these ioctls. Invalid object handles return
+ <constant>EINVAL</constant> and invalid object names return
+ <constant>ENOENT</constant>.</para>
+
+ <para>Gem provides explicit memory management primitives. System pages are
+ allocated when the object is created, either as the fundamental
+ storage for hardware where system memory is used by the graphics
+ processor directly, or as backing store for graphics-processor
+ resident memory.</para>
+
+ <para>Objects are referenced from user-space using handles. These are, for
+ all intents and purposes, equivalent to file descriptors but avoid
+ the overhead. Newer kernel drivers also support the
+ <citerefentry><refentrytitle>drm-prime</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ infrastructure which can return real file-descriptor for gem-handles
+ using the linux dma-buf API. Objects may be published with a name so
+ that other applications and processes can access them. The name
+ remains valid as long as the object exists. Gem-objects are
+ reference counted in the kernel. The object is only destroyed when
+ all handles from user-space were closed.</para>
+
+ <para>Gem-buffers cannot be created with a generic API. Each driver
+ provides its own API to create gem-buffers. See for example
+ <constant>DRM_I915_GEM_CREATE</constant>,
+ <constant>DRM_NOUVEAU_GEM_NEW</constant> or
+ <constant>DRM_RADEON_GEM_CREATE</constant>. Each of these ioctls
+ returns a gem-handle that can be passed to different generic ioctls.
+ The <emphasis>libgbm</emphasis> library from the
+ <emphasis>mesa3D</emphasis> distribution tries to provide a
+ driver-independent API to create gbm buffers and retrieve a
+ gbm-handle to them. It allows to create buffers for different
+ use-cases including scanout, rendering, cursors and CPU-access. See
+ the libgbm library for more information or look at the
+ driver-dependent man-pages (for example
+ <citerefentry><refentrytitle>drm-intel</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ or
+ <citerefentry><refentrytitle>drm-radeon</refentrytitle><manvolnum>7</manvolnum></citerefentry>).</para>
+
+ <para>Gem-buffers can be closed with the
+ <constant>DRM_IOCTL_GEM_CLOSE</constant> ioctl. It takes as argument
+ a structure of type <structname>struct drm_gem_close</structname>:
+
+<programlisting>
+struct drm_gem_close {
+ __u32 handle;
+ __u32 pad;
+};
+</programlisting>
+
+ The <structfield>handle</structfield> field is the gem-handle to be
+ closed. The <structfield>pad</structfield> field is unused padding.
+ It must be zeroed. After this call the gem handle cannot be used by
+ this process anymore and may be reused for new gem objects by the
+ gem API.</para>
+
+ <para>If you want to share gem-objects between different processes, you
+ can create a name for them and pass this name to other processes
+ which can then open this gem-object. Names are currently 32bit
+ integer IDs and have no special protection. That is, if you put a
+ name on your gem-object, every other client that has access to the
+ DRM device and is authenticated via
+ <citerefentry><refentrytitle>drmAuthMagic</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ to the current DRM-Master, can <emphasis>guess</emphasis> the name
+ and open or access the gem-object. If you want more fine-grained
+ access control, you can use the new
+ <citerefentry><refentrytitle>drm-prime</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ API to retrieve file-descriptors for gem-handles. To create a name
+ for a gem-handle, you use the
+ <constant>DRM_IOCTL_GEM_FLINK</constant> ioctl. It takes as argument
+ a structure of type <structname>struct drm_gem_flink</structname>:
+
+<programlisting>
+struct drm_gem_flink {
+ __u32 handle;
+ __u32 name;
+};
+</programlisting>
+
+ You have to put your handle into the
+ <structfield>handle</structfield> field. After completion, the
+ kernel has put the new unique name into the
+ <structfield>name</structfield> field. You can now pass this name to
+ other processes which can then import the name with the
+ <constant>DRM_IOCTL_GEM_OPEN</constant> ioctl. It takes as argument
+ a structure of type <structname>struct drm_gem_open</structname>:
+
+<programlisting>
+struct drm_gem_open {
+ __u32 name;
+
+ __u32 handle;
+ __u32 size;
+};
+</programlisting>
+
+ You have to fill in the <structfield>name</structfield> field with
+ the name of the gem-object that you want to open. The kernel will
+ fill in the <structfield>handle</structfield> and
+ <structfield>size</structfield> fields with the new handle and size
+ of the gem-object. You can now access the gem-object via the handle
+ as if you created it with the gem API.</para>
+
+ <para>Besides generic buffer management, the GEM API does not provide any
+ generic access. Each driver implements its own functionality on top
+ of this API. This includes execution-buffers, GTT management,
+ context creation, CPU access, GPU I/O and more. The next
+ higher-level API is <emphasis>OpenGL</emphasis>. So if you want to
+ use more GPU features, you should use the
+ <emphasis>mesa3D</emphasis> library to create OpenGL contexts on DRM
+ devices. This does <emphasis>not</emphasis> require any
+ windowing-system like X11, but can also be done on raw DRM devices.
+ However, this is beyond the scope of this man-page. You may have a
+ look at other mesa3D manpages, including libgbm and libEGL. 2D
+ software-rendering (rendering with the CPU) can be achieved with the
+ dumb-buffer-API in a driver-independent fashion, however, for
+ hardware-accelerated 2D or 3D rendering you must use OpenGL. Any
+ other API that tries to abstract the driver-internals to access
+ GEM-execution-buffers and other GPU internals, would simply reinvent
+ OpenGL so it is not provided. But if you need more detailed
+ information for a specific driver, you may have a look into the
+ driver-manpages, including
+ <citerefentry><refentrytitle>drm-intel</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-radeon</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ and
+ <citerefentry><refentrytitle>drm-nouveau</refentrytitle><manvolnum>7</manvolnum></citerefentry>.
+ However, the
+ <citerefentry><refentrytitle>drm-prime</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ infrastructure and the generic gem API as described here allow
+ display-managers to handle graphics-buffers and render-clients
+ without any deeper knowledge of the GPU that is used. Moreover, it
+ allows to move objects between GPUs and implement complex
+ display-servers that don't do any rendering on their own. See its
+ man-page for more information.</para>
+ </refsect2>
+ </refsect1>
+
+ <refsect1>
+ <title>Examples</title>
+ <para>This section includes examples for basic memory-management
+ tasks.</para>
+
+ <refsect2>
+ <title>Dumb-Buffers</title>
+ <para>This examples shows how to create a dumb-buffer via the generic
+ DRM API. This is driver-independent (as long as the driver
+ supports dumb-buffers) and provides memory-mapped buffers that can
+ be used for scanout. This example creates a full-HD 1920x1080
+ buffer with 32 bits-per-pixel and a color-depth of 24 bits. The
+ buffer is then bound to a framebuffer which can be used for
+ scanout with the KMS API (see
+ <citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>).</para>
+
+<programlisting>
+struct drm_mode_create_dumb creq;
+struct drm_mode_destroy_dumb dreq;
+struct drm_mode_map_dumb mreq;
+uint32_t fb;
+int ret;
+void *map;
+
+/* create dumb buffer */
+memset(&amp;creq, 0, sizeof(creq));
+creq.width = 1920;
+creq.height = 1080;
+creq.bpp = 32;
+ret = drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &amp;creq);
+if (ret &lt; 0) {
+ /* buffer creation failed; see "errno" for more error codes */
+ ...
+}
+/* creq.pitch, creq.handle and creq.size are filled by this ioctl with
+ * the requested values and can be used now. */
+
+/* create framebuffer object for the dumb-buffer */
+ret = drmModeAddFB(fd, 1920, 1080, 24, 32, creq.pitch, creq.handle, &amp;fb);
+if (ret) {
+ /* frame buffer creation failed; see "errno" */
+ ...
+}
+/* the framebuffer "fb" can now used for scanout with KMS */
+
+/* prepare buffer for memory mapping */
+memset(&amp;mreq, 0, sizeof(mreq));
+mreq.handle = creq.handle;
+ret = drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &amp;mreq);
+if (ret) {
+ /* DRM buffer preparation failed; see "errno" */
+ ...
+}
+/* mreq.offset now contains the new offset that can be used with mmap() */
+
+/* perform actual memory mapping */
+map = mmap(0, creq.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mreq.offset);
+if (map == MAP_FAILED) {
+ /* memory-mapping failed; see "errno" */
+ ...
+}
+
+/* clear the framebuffer to 0 */
+memset(map, 0, creq.size);
+</programlisting>
+
+ </refsect2>
+
+ </refsect1>
+
+ <refsect1>
+ <title>Reporting Bugs</title>
+ <para>Bugs in this manual should be reported to
+ http://bugs.freedesktop.org under the "Mesa" product, with "Other" or
+ "libdrm" as the component.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>See Also</title>
+ <para>
+ <citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-prime</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmAvailable</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-intel</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-radeon</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-nouveau</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ </para>
+ </refsect1>
+</refentry>
diff --git a/chromium/third_party/libdrm/src/man/drm.xml b/chromium/third_party/libdrm/src/man/drm.xml
new file mode 100644
index 00000000000..5a49fe13e20
--- /dev/null
+++ b/chromium/third_party/libdrm/src/man/drm.xml
@@ -0,0 +1,137 @@
+<?xml version='1.0'?> <!--*-nxml-*-->
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+
+<!--
+ Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
+ Dedicated to the Public Domain
+-->
+
+<refentry id="drm">
+ <refentryinfo>
+ <title>Direct Rendering Manager</title>
+ <productname>libdrm</productname>
+ <date>September 2012</date>
+ <authorgroup>
+ <author>
+ <contrib>Developer</contrib>
+ <firstname>David</firstname>
+ <surname>Herrmann</surname>
+ <email>dh.herrmann@googlemail.com</email>
+ </author>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>drm</refentrytitle>
+ <manvolnum>7</manvolnum>
+ </refmeta>
+
+ <refnamediv>
+ <refname>drm</refname>
+ <refpurpose>Direct Rendering Manager</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para>The <emphasis>Direct Rendering Manager</emphasis> (DRM) is a framework
+ to manage <emphasis>Graphics Processing Units</emphasis> (GPUs). It is
+ designed to support the needs of complex graphics devices, usually
+ containing programmable pipelines well suited to 3D graphics
+ acceleration. Furthermore, it is responsible for memory management,
+ interrupt handling and DMA to provide a uniform interface to
+ applications.</para>
+
+ <para>In earlier days, the kernel framework was solely used to provide raw
+ hardware access to priviledged user-space processes which implement
+ all the hardware abstraction layers. But more and more tasks where
+ moved into the kernel. All these interfaces are based on
+ <citerefentry><refentrytitle>ioctl</refentrytitle><manvolnum>2</manvolnum></citerefentry>
+ commands on the DRM character device. The <emphasis>libdrm</emphasis>
+ library provides wrappers for these system-calls and many helpers to
+ simplify the API.</para>
+
+ <para>When a GPU is detected, the DRM system loads a driver for the detected
+ hardware type. Each connected GPU is then presented to user-space via
+ a character-device that is usually available as
+ <filename>/dev/dri/card0</filename> and can be accessed with
+ <citerefentry><refentrytitle>open</refentrytitle><manvolnum>2</manvolnum></citerefentry>
+ and
+ <citerefentry><refentrytitle>close</refentrytitle><manvolnum>2</manvolnum></citerefentry>.
+ However, it still depends on the grapics driver which interfaces are
+ available on these devices. If an interface is not available, the
+ syscalls will fail with <literal>EINVAL</literal>.</para>
+
+ <refsect2>
+ <title>Authentication</title>
+ <para>All DRM devices provide authentication mechanisms. Only a DRM-Master
+ is allowed to perform mode-setting or modify core state and only one
+ user can be DRM-Master at a time. See
+ <citerefentry><refentrytitle>drmSetMaster</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ for information on how to become DRM-Master and what the limitations
+ are. Other DRM users can be authenticated to the DRM-Master via
+ <citerefentry><refentrytitle>drmAuthMagic</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ so they can perform buffer allocations and rendering.</para>
+ </refsect2>
+
+ <refsect2>
+ <title>Mode-Setting</title>
+ <para>Managing connected monitors and displays and changing the current
+ modes is called <emphasis>Mode-Setting</emphasis>. This is
+ restricted to the current DRM-Master. Historically, this was
+ implemented in user-space, but new DRM drivers implement a kernel
+ interface to perform mode-setting called
+ <emphasis>Kernel Mode Setting</emphasis> (KMS). If your
+ hardware-driver supports it, you can use the KMS API provided by
+ DRM. This includes allocating framebuffers, selecting modes and
+ managing CRTCs and encoders. See
+ <citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ for more.</para>
+ </refsect2>
+
+ <refsect2>
+ <title>Memory Management</title>
+ <para>The most sophisticated tasks for GPUs today is managing memory
+ objects. Textures, framebuffers, command-buffers and all other kinds
+ of commands for the GPU have to be stored in memory. The DRM driver
+ takes care of managing all memory objects, flushing caches,
+ synchronizing access and providing CPU access to GPU memory. All
+ memory management is hardware driver dependent. However, two generic
+ frameworks are available that are used by most DRM drivers. These
+ are the <emphasis>Translation Table Manager</emphasis> (TTM) and the
+ <emphasis>Graphics Execution Manager</emphasis> (GEM). They provide
+ generic APIs to create, destroy and access buffers from user-space.
+ However, there are still many differences between the drivers so
+ driver-depedent code is still needed. Many helpers are provided in
+ <emphasis>libgbm</emphasis> (Graphics Buffer Manager) from the
+ <emphasis>mesa-project</emphasis>. For more information on DRM
+ memory-management, see
+ <citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>.</para>
+ </refsect2>
+ </refsect1>
+
+ <refsect1>
+ <title>Reporting Bugs</title>
+ <para>Bugs in this manual should be reported to
+ http://bugs.freedesktop.org under the "Mesa" product, with "Other" or
+ "libdrm" as the component.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>See Also</title>
+ <para>
+ <citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmSetMaster</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmAuthMagic</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmAvailable</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ </para>
+ </refsect1>
+</refentry>
diff --git a/chromium/third_party/libdrm/src/man/drmAvailable.xml b/chromium/third_party/libdrm/src/man/drmAvailable.xml
new file mode 100644
index 00000000000..55bef94afe2
--- /dev/null
+++ b/chromium/third_party/libdrm/src/man/drmAvailable.xml
@@ -0,0 +1,75 @@
+<?xml version='1.0'?> <!--*-nxml-*-->
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+
+<!--
+ Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
+ Dedicated to the Public Domain
+-->
+
+<refentry id="drmAvailable">
+ <refentryinfo>
+ <title>Direct Rendering Manager</title>
+ <productname>libdrm</productname>
+ <date>September 2012</date>
+ <authorgroup>
+ <author>
+ <contrib>Developer</contrib>
+ <firstname>David</firstname>
+ <surname>Herrmann</surname>
+ <email>dh.herrmann@googlemail.com</email>
+ </author>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>drmAvailable</refentrytitle>
+ <manvolnum>3</manvolnum>
+ </refmeta>
+
+ <refnamediv>
+ <refname>drmAvailable</refname>
+ <refpurpose>determine whether a DRM kernel driver has been
+ loaded</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+
+ <funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
+
+ <funcprototype>
+ <funcdef>int <function>drmAvailable</function></funcdef>
+ <paramdef>void</paramdef>
+ </funcprototype>
+
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para><function>drmAvailable</function> allows the caller to determine
+ whether a kernel DRM driver is loaded.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>Return Value</title>
+ <para><function>drmAvailable</function> returns 1 if a DRM driver is
+ currently loaded. Otherwise 0 is returned.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>Reporting Bugs</title>
+ <para>Bugs in this function should be reported to
+ http://bugs.freedesktop.org under the "Mesa" product, with "Other" or
+ "libdrm" as the component.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>See Also</title>
+ <para>
+ <citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ </para>
+ </refsect1>
+</refentry>
diff --git a/chromium/third_party/libdrm/src/man/drmHandleEvent.xml b/chromium/third_party/libdrm/src/man/drmHandleEvent.xml
new file mode 100644
index 00000000000..b1006e5141d
--- /dev/null
+++ b/chromium/third_party/libdrm/src/man/drmHandleEvent.xml
@@ -0,0 +1,102 @@
+<?xml version='1.0'?> <!--*-nxml-*-->
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+
+<!--
+ Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
+ Dedicated to the Public Domain
+-->
+
+<refentry id="drmHandleEvent">
+ <refentryinfo>
+ <title>Direct Rendering Manager</title>
+ <productname>libdrm</productname>
+ <date>September 2012</date>
+ <authorgroup>
+ <author>
+ <contrib>Developer</contrib>
+ <firstname>David</firstname>
+ <surname>Herrmann</surname>
+ <email>dh.herrmann@googlemail.com</email>
+ </author>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>drmHandleEvent</refentrytitle>
+ <manvolnum>3</manvolnum>
+ </refmeta>
+
+ <refnamediv>
+ <refname>drmHandleEvent</refname>
+ <refpurpose>read and process pending DRM events</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+
+ <funcsynopsis>
+ <funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
+
+ <funcprototype>
+ <funcdef>int <function>drmHandleEvent</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>drmEventContextPtr <parameter>evctx</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para><function>drmHandleEvent</function> processes outstanding DRM events
+ on the DRM file-descriptor passed as <parameter>fd</parameter>. This
+ function should be called after the DRM file-descriptor has polled
+ readable; it will read the events and use the passed-in
+ <parameter>evctx</parameter> structure to call function pointers
+ with the parameters noted below:
+
+<programlisting>
+typedef struct _drmEventContext {
+ int version;
+ void (*vblank_handler) (int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data)
+ void (*page_flip_handler) (int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data)
+} drmEventContext, *drmEventContextPtr;
+</programlisting>
+
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>Return Value</title>
+ <para><function>drmHandleEvent</function> returns <literal>0</literal> on
+ success, or if there is no data to read from the file-descriptor.
+ Returns <literal>-1</literal> if the read on the file-descriptor fails
+ or returns less than a full event record.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>Reporting Bugs</title>
+ <para>Bugs in this function should be reported to
+ http://bugs.freedesktop.org under the "Mesa" product, with "Other" or
+ "libdrm" as the component.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>See Also</title>
+ <para>
+ <citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModePageFlip</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmWaitVBlank</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ </para>
+ </refsect1>
+</refentry>
diff --git a/chromium/third_party/libdrm/src/man/drmModeGetResources.xml b/chromium/third_party/libdrm/src/man/drmModeGetResources.xml
new file mode 100644
index 00000000000..2f5e8c2c53e
--- /dev/null
+++ b/chromium/third_party/libdrm/src/man/drmModeGetResources.xml
@@ -0,0 +1,139 @@
+<?xml version='1.0'?> <!--*-nxml-*-->
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+
+<!--
+ Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
+ Dedicated to the Public Domain
+-->
+
+<refentry id="drmModeGetResources">
+ <refentryinfo>
+ <title>Direct Rendering Manager</title>
+ <productname>libdrm</productname>
+ <date>September 2012</date>
+ <authorgroup>
+ <author>
+ <contrib>Developer</contrib>
+ <firstname>David</firstname>
+ <surname>Herrmann</surname>
+ <email>dh.herrmann@googlemail.com</email>
+ </author>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>drmModeGetResources</refentrytitle>
+ <manvolnum>3</manvolnum>
+ </refmeta>
+
+ <refnamediv>
+ <refname>drmModeGetResources</refname>
+ <refpurpose>retrieve current display configuration information</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+
+ <funcsynopsis>
+ <funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
+ <funcsynopsisinfo>#include &lt;xf86drmMode.h&gt;</funcsynopsisinfo>
+
+ <funcprototype>
+ <funcdef>drmModeResPtr <function>drmModeGetResources</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para><function>drmModeGetResources</function> allocates, populates, and
+ returns a <structname>drmModeRes</structname> structure containing
+ information about the current display configuration. The structure
+ contains the following fields:
+
+<programlisting>
+typedef struct _drmModeRes {
+ int count_fbs;
+ uint32_t *fbs;
+
+ int count_crtcs;
+ uint32_t *crtcs;
+
+ int count_connectors;
+ uint32_t *connectors;
+
+ int count_encoders;
+ uint32_t *encoders;
+
+ uint32_t min_width, max_width;
+ uint32_t min_height, max_height;
+} drmModeRes, *drmModeResPtr;
+</programlisting>
+
+ </para>
+
+ <para>The <structfield>count_fbs</structfield> and
+ <structfield>fbs</structfield> fields indicate the number of currently
+ allocated framebuffer objects (i.e., objects that can be attached to
+ a given CRTC or sprite for display).</para>
+
+ <para>The <structfield>count_crtcs</structfield> and
+ <structfield>crtcs</structfield> fields list the available CRTCs in
+ the configuration. A CRTC is simply an object that can scan out a
+ framebuffer to a display sink, and contains mode timing and relative
+ position information. CRTCs drive encoders, which are responsible for
+ converting the pixel stream into a specific display protocol (e.g.,
+ MIPI or HDMI).</para>
+
+ <para>The <structfield>count_connectors</structfield> and
+ <structfield>connectors</structfield> fields list the available
+ physical connectors on the system. Note that some of these may not be
+ exposed from the chassis (e.g., LVDS or eDP). Connectors are attached
+ to encoders and contain information about the attached display sink
+ (e.g., width and height in mm, subpixel ordering, and various other
+ properties).</para>
+
+ <para>The <structfield>count_encoders</structfield> and
+ <structfield>encoders</structfield> fields list the available encoders
+ on the device. Each encoder may be associated with a CRTC, and may be
+ used to drive a particular encoder.</para>
+
+ <para>The <structfield>min*</structfield> and
+ <structfield>max*</structfield> fields indicate the maximum size of a
+ framebuffer for this device (i.e., the scanout size limit).</para>
+ </refsect1>
+
+ <refsect1>
+ <title>Return Value</title>
+ <para><function>drmModeGetResources</function> returns a drmModeRes
+ structure pointer on success, <literal>NULL</literal> on failure. The
+ returned structure must be freed with
+ <citerefentry><refentrytitle>drmModeFreeResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>Reporting Bugs</title>
+ <para>Bugs in this function should be reported to
+ http://bugs.freedesktop.org under the "Mesa" product, with "Other" or
+ "libdrm" as the component.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>See Also</title>
+ <para>
+ <citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeAddFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeAddFB2</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeRmFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeDirtyFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetEncoder</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>drmModeGetConnector</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ </para>
+ </refsect1>
+</refentry>
diff --git a/chromium/third_party/libdrm/src/mediatek/Android.mk b/chromium/third_party/libdrm/src/mediatek/Android.mk
new file mode 100644
index 00000000000..c9ac9f102d0
--- /dev/null
+++ b/chromium/third_party/libdrm/src/mediatek/Android.mk
@@ -0,0 +1,31 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libdrm_mediatek
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES := mediatek_drm.c
+LOCAL_EXPORT_C_INCLUDE_DIRS += \
+ $(LOCAL_PATH)/mediatek
+
+LOCAL_C_INCLUDES := \
+ $(LIBDRM_TOP) \
+ $(LIBDRM_TOP)/mediatek \
+ $(LIBDRM_TOP)/include/drm
+
+# This makes sure mmap64 is used, as mmap offsets can be larger than 32-bit
+LOCAL_CFLAGS := \
+ -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1 \
+ -D_LARGEFILE_SOURCE \
+ -D_LARGEFILE64_SOURCE \
+ -D_FILE_OFFSET_BITS=64
+
+LOCAL_COPY_HEADERS := mediatek_drm.h mediatek_drmif.h
+LOCAL_COPY_HEADERS_TO := libdrm
+
+LOCAL_SHARED_LIBRARIES := \
+ libdrm
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/chromium/third_party/libdrm/src/mediatek/Makefile.am b/chromium/third_party/libdrm/src/mediatek/Makefile.am
new file mode 100644
index 00000000000..bb23e46677c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/mediatek/Makefile.am
@@ -0,0 +1,20 @@
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/mediatek \
+ $(PTHREADSTUBS_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_mediatek_la_LTLIBRARIES = libdrm_mediatek.la
+libdrm_mediatek_ladir = $(libdir)
+libdrm_mediatek_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libdrm_mediatek_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
+
+libdrm_mediatek_la_SOURCES = \
+ mediatek_drm.c
+
+libdrm_mediatekincludedir = ${includedir}/libdrm
+libdrm_mediatekinclude_HEADERS = mediatek_drmif.h mediatek_drm.h
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm_mediatek.pc
diff --git a/chromium/third_party/libdrm/src/mediatek/libdrm_mediatek.pc.in b/chromium/third_party/libdrm/src/mediatek/libdrm_mediatek.pc.in
new file mode 100644
index 00000000000..eaba97e372e
--- /dev/null
+++ b/chromium/third_party/libdrm/src/mediatek/libdrm_mediatek.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_mediatek
+Description: Userspace interface to mediatek kernel DRM services
+Version: 0.1
+Libs: -L${libdir} -ldrm_mediatek
+Cflags: -I${includedir} -I${includedir}/libdrm
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/mediatek/mediatek_drm.c b/chromium/third_party/libdrm/src/mediatek/mediatek_drm.c
new file mode 100644
index 00000000000..1122b03cde0
--- /dev/null
+++ b/chromium/third_party/libdrm/src/mediatek/mediatek_drm.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author:JB TSAI <jb.tsai@mediatek.com>
+ *
+ * based on rockchip_drm.c
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <sys/mman.h>
+#include <linux/stddef.h>
+
+#include <xf86drm.h>
+
+#include "mediatek_drm.h"
+#include "mediatek_drmif.h"
+
+/*
+ * Create mediatek drm device object.
+ *
+ * @fd: file descriptor to mediatek drm driver opened.
+ *
+ * if true, return the device object else NULL.
+ */
+struct mediatek_device *mediatek_device_create(int fd)
+{
+ struct mediatek_device *dev;
+
+ dev = calloc(1, sizeof(*dev));
+ if (!dev) {
+ fprintf(stderr, "failed to create device[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ dev->fd = fd;
+
+ return dev;
+}
+
+/*
+ * Destroy mediatek drm device object
+ *
+ * @dev: mediatek drm device object.
+ */
+void mediatek_device_destroy(struct mediatek_device *dev)
+{
+ free(dev);
+}
+
+/*
+ * Create a mediatek buffer object to mediatek drm device.
+ *
+ * @dev: mediatek drm device object.
+ * @size: user-desired size.
+ * flags: user-desired memory type.
+ * user can set one or more types among several types to memory
+ * allocation and cache attribute types. and as default,
+ * MEDIATEK_BO_NONCONTIG and MEDIATEK-BO_NONCACHABLE types would
+ * be used.
+ *
+ * if true, return a mediatek buffer object else NULL.
+ */
+struct mediatek_bo *mediatek_bo_create(struct mediatek_device *dev,
+ size_t size, uint32_t flags)
+{
+ struct mediatek_bo *bo;
+ struct drm_mtk_gem_create req = {
+ .size = size,
+ .flags = flags,
+ };
+
+ if (size == 0) {
+ fprintf(stderr, "invalid size.\n");
+ return NULL;
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo) {
+ fprintf(stderr, "failed to create bo[%s].\n",
+ strerror(errno));
+ goto fail;
+ }
+
+ bo->dev = dev;
+
+ if (drmIoctl(dev->fd, DRM_IOCTL_MTK_GEM_CREATE, &req)){
+ fprintf(stderr, "failed to create gem object[%s].\n",
+ strerror(errno));
+ goto err_free_bo;
+ }
+
+ bo->handle = req.handle;
+ bo->size = size;
+ bo->flags = flags;
+
+ return bo;
+
+err_free_bo:
+ free(bo);
+fail:
+ return NULL;
+}
+
+struct mediatek_bo *mediatek_bo_from_handle(struct mediatek_device *dev,
+ uint32_t handle, uint32_t flags, uint32_t size)
+{
+ struct mediatek_bo *bo;
+
+ if (size == 0) {
+ fprintf(stderr, "invalid size.\n");
+ return NULL;
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo) {
+ fprintf(stderr, "failed to create bo[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ bo->dev = dev;
+ bo->handle = handle;
+ bo->size = size;
+ bo->flags = flags;
+
+ return bo;
+}
+
+/*
+ * Destroy a mediatek buffer object.
+ *
+ * @bo: a mediatek buffer object to be destroyed.
+ */
+void mediatek_bo_destroy(struct mediatek_bo *bo)
+{
+ if (!bo)
+ return;
+
+ if (bo->vaddr)
+ munmap(bo->vaddr, bo->size);
+
+ if (bo->handle) {
+ struct drm_gem_close req = {
+ .handle = bo->handle,
+ };
+
+ drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ }
+
+ free(bo);
+}
+
+
+/*
+ * Get a mediatek buffer object from a gem global object name.
+ *
+ * @dev: a mediatek device object.
+ * @name: a gem global object name exported by another process.
+ *
+ * this interface is used to get a mediatek buffer object from a gem
+ * global object name sent by another process for buffer sharing.
+ *
+ * if true, return a mediatek buffer object else NULL.
+ *
+ */
+struct mediatek_bo *mediatek_bo_from_name(struct mediatek_device *dev,
+ uint32_t name)
+{
+ struct mediatek_bo *bo;
+ struct drm_gem_open req = {
+ .name = name,
+ };
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo) {
+ fprintf(stderr, "failed to allocate bo[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
+ fprintf(stderr, "failed to open gem object[%s].\n",
+ strerror(errno));
+ goto err_free_bo;
+ }
+
+ bo->dev = dev;
+ bo->name = name;
+ bo->handle = req.handle;
+
+ return bo;
+
+err_free_bo:
+ free(bo);
+ return NULL;
+}
+
+/*
+ * Get a gem global object name from a gem object handle.
+ *
+ * @bo: a mediatek buffer object including gem handle.
+ * @name: a gem global object name to be got by kernel driver.
+ *
+ * this interface is used to get a gem global object name from a gem object
+ * handle to a buffer that wants to share it with another process.
+ *
+ * if true, return 0 else negative.
+ */
+int mediatek_bo_get_name(struct mediatek_bo *bo, uint32_t *name)
+{
+ if (!bo->name) {
+ struct drm_gem_flink req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
+ if (ret) {
+ fprintf(stderr, "failed to get gem global name[%s].\n",
+ strerror(errno));
+ return ret;
+ }
+
+ bo->name = req.name;
+ }
+
+ *name = bo->name;
+
+ return 0;
+}
+
+uint32_t mediatek_bo_handle(struct mediatek_bo *bo)
+{
+ return bo->handle;
+}
+
+/*
+ * Mmap a buffer to user space.
+ *
+ * @bo: a mediatek buffer object including a gem object handle to be mmapped
+ * to user space.
+ *
+ * if true, user pointer mmaped else NULL.
+ */
+void *mediatek_bo_map(struct mediatek_bo *bo)
+{
+ if (!bo->vaddr) {
+ struct mediatek_device *dev = bo->dev;
+ struct drm_mtk_gem_map_off req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ ret = drmIoctl(dev->fd, DRM_IOCTL_MTK_GEM_MAP_OFFSET, &req);
+ if (ret) {
+ fprintf(stderr, "failed to ioctl gem map offset[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ bo->vaddr = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, dev->fd, req.offset);
+ if (bo->vaddr == MAP_FAILED) {
+ fprintf(stderr, "failed to mmap buffer[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+ }
+
+ return bo->vaddr;
+}
diff --git a/chromium/third_party/libdrm/src/mediatek/mediatek_drm.h b/chromium/third_party/libdrm/src/mediatek/mediatek_drm.h
new file mode 100644
index 00000000000..6d87d9c7982
--- /dev/null
+++ b/chromium/third_party/libdrm/src/mediatek/mediatek_drm.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef _UAPI_MEDIATEK_DRM_H
+#define _UAPI_MEDIATEK_DRM_H
+
+#include "drm.h"
+
+/**
+ * User-desired buffer creation information structure.
+ *
+ * @size: user-desired memory allocation size.
+ * - this size value would be page-aligned internally.
+ * @flags: user request for setting memory type or cache attributes.
+ * @handle: returned a handle to created gem object.
+ * - this handle will be set by gem module of kernel side.
+ */
+struct drm_mtk_gem_create {
+ uint64_t size;
+ uint32_t flags;
+ uint32_t handle;
+};
+
+/**
+ * A structure for getting buffer offset.
+ *
+ * @handle: a pointer to gem object created.
+ * @pad: just padding to be 64-bit aligned.
+ * @offset: relatived offset value of the memory region allocated.
+ * - this value should be set by user.
+ */
+struct drm_mtk_gem_map_off {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+};
+
+#define DRM_MTK_GEM_CREATE 0x00
+#define DRM_MTK_GEM_MAP_OFFSET 0x01
+
+#define DRM_IOCTL_MTK_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_MTK_GEM_CREATE, struct drm_mtk_gem_create)
+
+#define DRM_IOCTL_MTK_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_MTK_GEM_MAP_OFFSET, struct drm_mtk_gem_map_off)
+
+
+#endif /* _UAPI_MEDIATEK_DRM_H */
diff --git a/chromium/third_party/libdrm/src/mediatek/mediatek_drmif.h b/chromium/third_party/libdrm/src/mediatek/mediatek_drmif.h
new file mode 100644
index 00000000000..47cd2d810ab
--- /dev/null
+++ b/chromium/third_party/libdrm/src/mediatek/mediatek_drmif.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author:JB TSAI <jb.tsai@mediatek.com>
+ *
+ * based on rockchip_drmif.h
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef MEDIATEK_DRMIF_H_
+#define MEDIATEK_DRMIF_H_
+
+#include <xf86drm.h>
+#include <stdint.h>
+#include "mediatek_drm.h"
+
+struct mediatek_device {
+ int fd;
+};
+
+/*
+ * Mediatek Buffer Object structure.
+ *
+ * @dev: mediatek device object allocated.
+ * @handle: a gem handle to gem object created.
+ * @flags: indicate memory allocation and cache attribute types.
+ * @size: size to the buffer created.
+ * @vaddr: user space address to a gem buffer mmaped.
+ * @name: a gem global handle from flink request.
+ */
+struct mediatek_bo {
+ struct mediatek_device *dev;
+ uint32_t handle;
+ uint32_t flags;
+ size_t size;
+ void *vaddr;
+ uint32_t name;
+};
+
+/*
+ * device related functions:
+ */
+struct mediatek_device *mediatek_device_create(int fd);
+void mediatek_device_destroy(struct mediatek_device *dev);
+
+/*
+ * buffer-object related functions:
+ */
+struct mediatek_bo *mediatek_bo_create(struct mediatek_device *dev,
+ size_t size, uint32_t flags);
+int mediatek_bo_get_info(struct mediatek_device *dev, uint32_t handle,
+ size_t *size, uint32_t *flags);
+void mediatek_bo_destroy(struct mediatek_bo *bo);
+struct mediatek_bo *mediatek_bo_from_name(struct mediatek_device *dev,
+ uint32_t name);
+int mediatek_bo_get_name(struct mediatek_bo *bo, uint32_t *name);
+uint32_t mediatek_bo_handle(struct mediatek_bo *bo);
+struct mediatek_bo *mediatek_bo_from_handle(struct mediatek_device *dev,
+ uint32_t handle, uint32_t flags, uint32_t size);
+void *mediatek_bo_map(struct mediatek_bo *bo);
+#endif /* MEDIATEK_DRMIF_H_ */
diff --git a/chromium/third_party/libdrm/src/nouveau/Android.mk b/chromium/third_party/libdrm/src/nouveau/Android.mk
new file mode 100644
index 00000000000..b67d30fc76e
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/Android.mk
@@ -0,0 +1,18 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+# Import variables LIBDRM_NOUVEAU_FILES, LIBDRM_NOUVEAU_H_FILES
+include $(LOCAL_PATH)/Makefile.sources
+
+LOCAL_MODULE := libdrm_nouveau
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES := $(patsubst %.h, , $(LIBDRM_NOUVEAU_FILES))
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+
+LOCAL_CFLAGS := \
+ -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/chromium/third_party/libdrm/src/nouveau/Makefile.am b/chromium/third_party/libdrm/src/nouveau/Makefile.am
new file mode 100644
index 00000000000..344a84454e4
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/Makefile.am
@@ -0,0 +1,33 @@
+include Makefile.sources
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ $(PTHREADSTUBS_CFLAGS) \
+ -I$(top_srcdir)/include/drm \
+ -DDEBUG
+
+libdrm_nouveau_la_LTLIBRARIES = libdrm_nouveau.la
+libdrm_nouveau_ladir = $(libdir)
+libdrm_nouveau_la_LDFLAGS = -version-number 2:0:0 -no-undefined
+libdrm_nouveau_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
+
+libdrm_nouveau_la_SOURCES = $(LIBDRM_NOUVEAU_FILES)
+
+libdrm_nouveauincludedir = ${includedir}/libdrm/nouveau
+libdrm_nouveauinclude_HEADERS = $(LIBDRM_NOUVEAU_H_FILES)
+
+libdrm_nouveaunvifincludedir = ${includedir}/libdrm/nouveau/nvif
+libdrm_nouveaunvifinclude_HEADERS = nvif/class.h \
+ nvif/cl0080.h \
+ nvif/cl9097.h \
+ nvif/if0002.h \
+ nvif/if0003.h \
+ nvif/ioctl.h \
+ nvif/unpack.h
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm_nouveau.pc
+
+TESTS = nouveau-symbol-check
+EXTRA_DIST = $(TESTS)
diff --git a/chromium/third_party/libdrm/src/nouveau/Makefile.sources b/chromium/third_party/libdrm/src/nouveau/Makefile.sources
new file mode 100644
index 00000000000..89f2a2b5cec
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/Makefile.sources
@@ -0,0 +1,9 @@
+LIBDRM_NOUVEAU_FILES := \
+ nouveau.c \
+ pushbuf.c \
+ bufctx.c \
+ abi16.c \
+ private.h
+
+LIBDRM_NOUVEAU_H_FILES := \
+ nouveau.h
diff --git a/chromium/third_party/libdrm/src/nouveau/abi16.c b/chromium/third_party/libdrm/src/nouveau/abi16.c
new file mode 100644
index 00000000000..ee38c0cb228
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/abi16.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <errno.h>
+
+#include "private.h"
+
+#include "nvif/class.h"
+
+static int
+abi16_chan_nv04(struct nouveau_object *obj)
+{
+ struct nouveau_drm *drm = nouveau_drm(obj);
+ struct nv04_fifo *nv04 = obj->data;
+ struct drm_nouveau_channel_alloc req = {
+ .fb_ctxdma_handle = nv04->vram,
+ .tt_ctxdma_handle = nv04->gart
+ };
+ int ret;
+
+ ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ nv04->base.channel = req.channel;
+ nv04->base.pushbuf = req.pushbuf_domains;
+ nv04->notify = req.notifier_handle;
+ nv04->base.object->handle = req.channel;
+ nv04->base.object->length = sizeof(*nv04);
+ return 0;
+}
+
+static int
+abi16_chan_nvc0(struct nouveau_object *obj)
+{
+ struct nouveau_drm *drm = nouveau_drm(obj);
+ struct drm_nouveau_channel_alloc req = {};
+ struct nvc0_fifo *nvc0 = obj->data;
+ int ret;
+
+ ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ nvc0->base.channel = req.channel;
+ nvc0->base.pushbuf = req.pushbuf_domains;
+ nvc0->notify = req.notifier_handle;
+ nvc0->base.object->handle = req.channel;
+ nvc0->base.object->length = sizeof(*nvc0);
+ return 0;
+}
+
+static int
+abi16_chan_nve0(struct nouveau_object *obj)
+{
+ struct nouveau_drm *drm = nouveau_drm(obj);
+ struct drm_nouveau_channel_alloc req = {};
+ struct nve0_fifo *nve0 = obj->data;
+ int ret;
+
+ if (obj->length > offsetof(struct nve0_fifo, engine)) {
+ req.fb_ctxdma_handle = 0xffffffff;
+ req.tt_ctxdma_handle = nve0->engine;
+ }
+
+ ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ nve0->base.channel = req.channel;
+ nve0->base.pushbuf = req.pushbuf_domains;
+ nve0->notify = req.notifier_handle;
+ nve0->base.object->handle = req.channel;
+ nve0->base.object->length = sizeof(*nve0);
+ return 0;
+}
+
+static int
+abi16_engobj(struct nouveau_object *obj)
+{
+ struct nouveau_drm *drm = nouveau_drm(obj);
+ struct drm_nouveau_grobj_alloc req = {
+ .channel = obj->parent->handle,
+ .handle = obj->handle,
+ .class = obj->oclass,
+ };
+ int ret;
+
+ /* Older kernel versions did not have the concept of nouveau-
+ * specific classes and abused some NVIDIA-assigned ones for
+ * a SW class. The ABI16 layer has compatibility in place to
+ * translate these older identifiers to the newer ones.
+ *
+ * Clients that have been updated to use NVIF are required to
+ * use the newer class identifiers, which means that they'll
+ * break if running on an older kernel.
+ *
+ * To handle this case, when using ABI16, we translate to the
+ * older values which work on any kernel.
+ */
+ switch (req.class) {
+ case NVIF_CLASS_SW_NV04 : req.class = 0x006e; break;
+ case NVIF_CLASS_SW_NV10 : req.class = 0x016e; break;
+ case NVIF_CLASS_SW_NV50 : req.class = 0x506e; break;
+ case NVIF_CLASS_SW_GF100: req.class = 0x906e; break;
+ default:
+ break;
+ }
+
+ ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GROBJ_ALLOC,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ obj->length = sizeof(struct nouveau_object *);
+ return 0;
+}
+
+static int
+abi16_ntfy(struct nouveau_object *obj)
+{
+ struct nouveau_drm *drm = nouveau_drm(obj);
+ struct nv04_notify *ntfy = obj->data;
+ struct drm_nouveau_notifierobj_alloc req = {
+ .channel = obj->parent->handle,
+ .handle = ntfy->object->handle,
+ .size = ntfy->length,
+ };
+ int ret;
+
+ ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NOTIFIEROBJ_ALLOC,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ ntfy->offset = req.offset;
+ ntfy->object->length = sizeof(*ntfy);
+ return 0;
+}
+
+drm_private int
+abi16_sclass(struct nouveau_object *obj, struct nouveau_sclass **psclass)
+{
+ struct nouveau_sclass *sclass;
+ struct nouveau_device *dev;
+
+ if (!(sclass = calloc(8, sizeof(*sclass))))
+ return -ENOMEM;
+ *psclass = sclass;
+
+ switch (obj->oclass) {
+ case NOUVEAU_FIFO_CHANNEL_CLASS:
+ /* Older kernel versions were exposing the wrong video engine
+ * classes on certain G98:GF100 boards. This has since been
+ * corrected, but ABI16 has compatibility in place to avoid
+ * breaking older userspace.
+ *
+ * Clients that have been updated to use NVIF are required to
+ * use the correct classes, which means that they'll break if
+ * running on an older kernel.
+ *
+ * To handle this issue, if using the older kernel interfaces,
+ * we'll magic up a list containing the vdec classes that the
+ * kernel will accept for these boards. Clients should make
+ * use of this information instead of hardcoding classes for
+ * specific chipsets.
+ */
+ dev = (struct nouveau_device *)obj->parent;
+ if (dev->chipset >= 0x98 &&
+ dev->chipset != 0xa0 &&
+ dev->chipset < 0xc0) {
+ *sclass++ = (struct nouveau_sclass){
+ GT212_MSVLD, -1, -1
+ };
+ *sclass++ = (struct nouveau_sclass){
+ GT212_MSPDEC, -1, -1
+ };
+ *sclass++ = (struct nouveau_sclass){
+ GT212_MSPPP, -1, -1
+ };
+ }
+ break;
+ default:
+ break;
+ }
+
+ return sclass - *psclass;
+}
+
+drm_private void
+abi16_delete(struct nouveau_object *obj)
+{
+ struct nouveau_drm *drm = nouveau_drm(obj);
+ if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
+ struct drm_nouveau_channel_free req;
+ req.channel = obj->handle;
+ drmCommandWrite(drm->fd, DRM_NOUVEAU_CHANNEL_FREE,
+ &req, sizeof(req));
+ } else {
+ struct drm_nouveau_gpuobj_free req;
+ req.channel = obj->parent->handle;
+ req.handle = obj->handle;
+ drmCommandWrite(drm->fd, DRM_NOUVEAU_GPUOBJ_FREE,
+ &req, sizeof(req));
+ }
+}
+
+drm_private bool
+abi16_object(struct nouveau_object *obj, int (**func)(struct nouveau_object *))
+{
+ struct nouveau_object *parent = obj->parent;
+
+ /* nouveau_object::length is (ab)used to determine whether the
+ * object is a legacy object (!=0), or a real NVIF object.
+ */
+ if ((parent->length != 0 && parent->oclass == NOUVEAU_DEVICE_CLASS) ||
+ (parent->length == 0 && parent->oclass == NV_DEVICE)) {
+ if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
+ struct nouveau_device *dev = (void *)parent;
+ if (dev->chipset < 0xc0)
+ *func = abi16_chan_nv04;
+ else
+ if (dev->chipset < 0xe0)
+ *func = abi16_chan_nvc0;
+ else
+ *func = abi16_chan_nve0;
+ return true;
+ }
+ } else
+ if ((parent->length != 0 &&
+ parent->oclass == NOUVEAU_FIFO_CHANNEL_CLASS)) {
+ if (obj->oclass == NOUVEAU_NOTIFIER_CLASS) {
+ *func = abi16_ntfy;
+ return true;
+ }
+
+ *func = abi16_engobj;
+ return false; /* try NVIF, if supported, before calling func */
+ }
+
+ *func = NULL;
+ return false;
+}
+
+drm_private void
+abi16_bo_info(struct nouveau_bo *bo, struct drm_nouveau_gem_info *info)
+{
+ struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
+
+ nvbo->map_handle = info->map_handle;
+ bo->handle = info->handle;
+ bo->size = info->size;
+ bo->offset = info->offset;
+
+ bo->flags = 0;
+ if (info->domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ bo->flags |= NOUVEAU_BO_VRAM;
+ if (info->domain & NOUVEAU_GEM_DOMAIN_GART)
+ bo->flags |= NOUVEAU_BO_GART;
+ if (!(info->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG))
+ bo->flags |= NOUVEAU_BO_CONTIG;
+ if (nvbo->map_handle)
+ bo->flags |= NOUVEAU_BO_MAP;
+
+ if (bo->device->chipset >= 0xc0) {
+ bo->config.nvc0.memtype = (info->tile_flags & 0xff00) >> 8;
+ bo->config.nvc0.tile_mode = info->tile_mode;
+ } else
+ if (bo->device->chipset >= 0x80 || bo->device->chipset == 0x50) {
+ bo->config.nv50.memtype = (info->tile_flags & 0x07f00) >> 8 |
+ (info->tile_flags & 0x30000) >> 9;
+ bo->config.nv50.tile_mode = info->tile_mode << 4;
+ } else {
+ bo->config.nv04.surf_flags = info->tile_flags & 7;
+ bo->config.nv04.surf_pitch = info->tile_mode;
+ }
+}
+
+drm_private int
+abi16_bo_init(struct nouveau_bo *bo, uint32_t alignment,
+ union nouveau_bo_config *config)
+{
+ struct nouveau_device *dev = bo->device;
+ struct nouveau_drm *drm = nouveau_drm(&dev->object);
+ struct drm_nouveau_gem_new req = {};
+ struct drm_nouveau_gem_info *info = &req.info;
+ int ret;
+
+ if (bo->flags & NOUVEAU_BO_VRAM)
+ info->domain |= NOUVEAU_GEM_DOMAIN_VRAM;
+ if (bo->flags & NOUVEAU_BO_GART)
+ info->domain |= NOUVEAU_GEM_DOMAIN_GART;
+ if (!info->domain)
+ info->domain |= NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+
+ if (bo->flags & NOUVEAU_BO_MAP)
+ info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;
+
+ if (bo->flags & NOUVEAU_BO_COHERENT)
+ info->domain |= NOUVEAU_GEM_DOMAIN_COHERENT;
+
+ if (!(bo->flags & NOUVEAU_BO_CONTIG))
+ info->tile_flags = NOUVEAU_GEM_TILE_NONCONTIG;
+
+ info->size = bo->size;
+ req.align = alignment;
+
+ if (config) {
+ if (dev->chipset >= 0xc0) {
+ info->tile_flags = (config->nvc0.memtype & 0xff) << 8;
+ info->tile_mode = config->nvc0.tile_mode;
+ } else
+ if (dev->chipset >= 0x80 || dev->chipset == 0x50) {
+ info->tile_flags = (config->nv50.memtype & 0x07f) << 8 |
+ (config->nv50.memtype & 0x180) << 9;
+ info->tile_mode = config->nv50.tile_mode >> 4;
+ } else {
+ info->tile_flags = config->nv04.surf_flags & 7;
+ info->tile_mode = config->nv04.surf_pitch;
+ }
+ }
+
+ if (!nouveau_device(dev)->have_bo_usage)
+ info->tile_flags &= 0x0000ff00;
+
+ ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_NEW,
+ &req, sizeof(req));
+ if (ret == 0)
+ abi16_bo_info(bo, &req.info);
+ return ret;
+}
diff --git a/chromium/third_party/libdrm/src/nouveau/bufctx.c b/chromium/third_party/libdrm/src/nouveau/bufctx.c
new file mode 100644
index 00000000000..4f76e5dff35
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/bufctx.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <errno.h>
+
+#include "libdrm_lists.h"
+
+#include "nouveau.h"
+#include "private.h"
+
+struct nouveau_bufref_priv {
+ struct nouveau_bufref base;
+ struct nouveau_bufref_priv *next;
+ struct nouveau_bufctx *bufctx;
+};
+
+struct nouveau_bufbin_priv {
+ struct nouveau_bufref_priv *list;
+ int relocs;
+};
+
+struct nouveau_bufctx_priv {
+ struct nouveau_bufctx base;
+ struct nouveau_bufref_priv *free;
+ int nr_bins;
+ struct nouveau_bufbin_priv bins[];
+};
+
+static inline struct nouveau_bufctx_priv *
+nouveau_bufctx(struct nouveau_bufctx *bctx)
+{
+ return (struct nouveau_bufctx_priv *)bctx;
+}
+
+int
+nouveau_bufctx_new(struct nouveau_client *client, int bins,
+ struct nouveau_bufctx **pbctx)
+{
+ struct nouveau_bufctx_priv *priv;
+
+ priv = calloc(1, sizeof(*priv) + sizeof(priv->bins[0]) * bins);
+ if (priv) {
+ DRMINITLISTHEAD(&priv->base.head);
+ DRMINITLISTHEAD(&priv->base.pending);
+ DRMINITLISTHEAD(&priv->base.current);
+ priv->base.client = client;
+ priv->nr_bins = bins;
+ *pbctx = &priv->base;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+void
+nouveau_bufctx_del(struct nouveau_bufctx **pbctx)
+{
+ struct nouveau_bufctx_priv *pctx = nouveau_bufctx(*pbctx);
+ struct nouveau_bufref_priv *pref;
+ if (pctx) {
+ while (pctx->nr_bins--)
+ nouveau_bufctx_reset(&pctx->base, pctx->nr_bins);
+ while ((pref = pctx->free)) {
+ pctx->free = pref->next;
+ free(pref);
+ }
+ free(pctx);
+ *pbctx = NULL;
+ }
+}
+
+void
+nouveau_bufctx_reset(struct nouveau_bufctx *bctx, int bin)
+{
+ struct nouveau_bufctx_priv *pctx = nouveau_bufctx(bctx);
+ struct nouveau_bufbin_priv *pbin = &pctx->bins[bin];
+ struct nouveau_bufref_priv *pref;
+
+ while ((pref = pbin->list)) {
+ DRMLISTDELINIT(&pref->base.thead);
+ pbin->list = pref->next;
+ pref->next = pctx->free;
+ pctx->free = pref;
+ }
+
+ bctx->relocs -= pbin->relocs;
+ pbin->relocs = 0;
+}
+
+struct nouveau_bufref *
+nouveau_bufctx_refn(struct nouveau_bufctx *bctx, int bin,
+ struct nouveau_bo *bo, uint32_t flags)
+{
+ struct nouveau_bufctx_priv *pctx = nouveau_bufctx(bctx);
+ struct nouveau_bufbin_priv *pbin = &pctx->bins[bin];
+ struct nouveau_bufref_priv *pref = pctx->free;
+
+ if (!pref)
+ pref = malloc(sizeof(*pref));
+ else
+ pctx->free = pref->next;
+
+ if (pref) {
+ pref->base.bo = bo;
+ pref->base.flags = flags;
+ pref->base.packet = 0;
+
+ DRMLISTADDTAIL(&pref->base.thead, &bctx->pending);
+ pref->bufctx = bctx;
+ pref->next = pbin->list;
+ pbin->list = pref;
+ }
+
+ return &pref->base;
+}
+
+struct nouveau_bufref *
+nouveau_bufctx_mthd(struct nouveau_bufctx *bctx, int bin, uint32_t packet,
+ struct nouveau_bo *bo, uint64_t data, uint32_t flags,
+ uint32_t vor, uint32_t tor)
+{
+ struct nouveau_bufctx_priv *pctx = nouveau_bufctx(bctx);
+ struct nouveau_bufbin_priv *pbin = &pctx->bins[bin];
+ struct nouveau_bufref *bref = nouveau_bufctx_refn(bctx, bin, bo, flags);
+ if (bref) {
+ bref->packet = packet;
+ bref->data = data;
+ bref->vor = vor;
+ bref->tor = tor;
+ pbin->relocs++;
+ bctx->relocs++;
+ }
+ return bref;
+}
diff --git a/chromium/third_party/libdrm/src/nouveau/libdrm_nouveau.pc.in b/chromium/third_party/libdrm/src/nouveau/libdrm_nouveau.pc.in
new file mode 100644
index 00000000000..7d0622e9a71
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/libdrm_nouveau.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_nouveau
+Description: Userspace interface to nouveau kernel DRM services
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -ldrm_nouveau
+Cflags: -I${includedir} -I${includedir}/libdrm -I${includedir}/libdrm/nouveau
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/nouveau/nouveau-symbol-check b/chromium/third_party/libdrm/src/nouveau/nouveau-symbol-check
new file mode 100755
index 00000000000..b265cea466a
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nouveau-symbol-check
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+# The following symbols (past the first five) are taken from the public headers.
+# A list of the latter should be available Makefile.sources/LIBDRM_NOUVEAU_H_FILES
+
+FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_nouveau.so} | awk '{print $3}'| while read func; do
+( grep -q "^$func$" || echo $func ) <<EOF
+__bss_start
+_edata
+_end
+_fini
+_init
+nouveau_bo_map
+nouveau_bo_name_get
+nouveau_bo_name_ref
+nouveau_bo_new
+nouveau_bo_prime_handle_ref
+nouveau_bo_ref
+nouveau_bo_set_prime
+nouveau_bo_wait
+nouveau_bo_wrap
+nouveau_bufctx_del
+nouveau_bufctx_mthd
+nouveau_bufctx_new
+nouveau_bufctx_refn
+nouveau_bufctx_reset
+nouveau_client_del
+nouveau_client_new
+nouveau_device_del
+nouveau_device_new
+nouveau_device_open
+nouveau_device_open_existing
+nouveau_device_wrap
+nouveau_drm_del
+nouveau_drm_new
+nouveau_getparam
+nouveau_object_del
+nouveau_object_mclass
+nouveau_object_mthd
+nouveau_object_new
+nouveau_object_sclass_get
+nouveau_object_sclass_put
+nouveau_pushbuf_bufctx
+nouveau_pushbuf_data
+nouveau_pushbuf_del
+nouveau_pushbuf_kick
+nouveau_pushbuf_new
+nouveau_pushbuf_refd
+nouveau_pushbuf_refn
+nouveau_pushbuf_reloc
+nouveau_pushbuf_space
+nouveau_pushbuf_validate
+nouveau_setparam
+EOF
+done)
+
+test ! -n "$FUNCS" || echo $FUNCS
+test ! -n "$FUNCS"
diff --git a/chromium/third_party/libdrm/src/nouveau/nouveau.c b/chromium/third_party/libdrm/src/nouveau/nouveau.c
new file mode 100644
index 00000000000..e113a8fe780
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nouveau.c
@@ -0,0 +1,878 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <strings.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <xf86drm.h>
+#include <xf86atomic.h>
+#include "libdrm_macros.h"
+#include "libdrm_lists.h"
+#include "nouveau_drm.h"
+
+#include "nouveau.h"
+#include "private.h"
+
+#include "nvif/class.h"
+#include "nvif/cl0080.h"
+#include "nvif/ioctl.h"
+#include "nvif/unpack.h"
+
+#ifdef DEBUG
+drm_private uint32_t nouveau_debug = 0;
+
+static void
+debug_init(char *args)
+{
+ if (args) {
+ int n = strtol(args, NULL, 0);
+ if (n >= 0)
+ nouveau_debug = n;
+ }
+}
+#endif
+
+static int
+nouveau_object_ioctl(struct nouveau_object *obj, void *data, uint32_t size)
+{
+ struct nouveau_drm *drm = nouveau_drm(obj);
+ union {
+ struct nvif_ioctl_v0 v0;
+ } *args = data;
+ uint32_t argc = size;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
+ if (!obj->length) {
+ if (obj != &drm->client)
+ args->v0.object = (unsigned long)(void *)obj;
+ else
+ args->v0.object = 0;
+ args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
+ args->v0.route = 0x00;
+ } else {
+ args->v0.route = 0xff;
+ args->v0.token = obj->handle;
+ }
+ } else
+ return ret;
+
+ return drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NVIF, args, argc);
+}
+
+int
+nouveau_object_mthd(struct nouveau_object *obj,
+ uint32_t mthd, void *data, uint32_t size)
+{
+ struct nouveau_drm *drm = nouveau_drm(obj);
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_mthd_v0 mthd;
+ } *args;
+ uint32_t argc = sizeof(*args) + size;
+ uint8_t stack[128];
+ int ret;
+
+ if (!drm->nvif)
+ return -ENOSYS;
+
+ if (argc > sizeof(stack)) {
+ if (!(args = malloc(argc)))
+ return -ENOMEM;
+ } else {
+ args = (void *)stack;
+ }
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_MTHD;
+ args->mthd.version = 0;
+ args->mthd.method = mthd;
+
+ memcpy(args->mthd.data, data, size);
+ ret = nouveau_object_ioctl(obj, args, argc);
+ memcpy(data, args->mthd.data, size);
+ if (args != (void *)stack)
+ free(args);
+ return ret;
+}
+
+void
+nouveau_object_sclass_put(struct nouveau_sclass **psclass)
+{
+ free(*psclass);
+ *psclass = NULL;
+}
+
+int
+nouveau_object_sclass_get(struct nouveau_object *obj,
+ struct nouveau_sclass **psclass)
+{
+ struct nouveau_drm *drm = nouveau_drm(obj);
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_sclass_v0 sclass;
+ } *args = NULL;
+ struct nouveau_sclass *sclass;
+ int ret, cnt = 0, i;
+ uint32_t size;
+
+ if (!drm->nvif)
+ return abi16_sclass(obj, psclass);
+
+ while (1) {
+ size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
+ if (!(args = malloc(size)))
+ return -ENOMEM;
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
+ args->sclass.version = 0;
+ args->sclass.count = cnt;
+
+ ret = nouveau_object_ioctl(obj, args, size);
+ if (ret == 0 && args->sclass.count <= cnt)
+ break;
+ cnt = args->sclass.count;
+ free(args);
+ if (ret != 0)
+ return ret;
+ }
+
+ if ((sclass = calloc(args->sclass.count, sizeof(*sclass)))) {
+ for (i = 0; i < args->sclass.count; i++) {
+ sclass[i].oclass = args->sclass.oclass[i].oclass;
+ sclass[i].minver = args->sclass.oclass[i].minver;
+ sclass[i].maxver = args->sclass.oclass[i].maxver;
+ }
+ *psclass = sclass;
+ ret = args->sclass.count;
+ } else {
+ ret = -ENOMEM;
+ }
+
+ free(args);
+ return ret;
+}
+
+int
+nouveau_object_mclass(struct nouveau_object *obj,
+ const struct nouveau_mclass *mclass)
+{
+ struct nouveau_sclass *sclass;
+ int ret = -ENODEV;
+ int cnt, i, j;
+
+ cnt = nouveau_object_sclass_get(obj, &sclass);
+ if (cnt < 0)
+ return cnt;
+
+ for (i = 0; ret < 0 && mclass[i].oclass; i++) {
+ for (j = 0; j < cnt; j++) {
+ if (mclass[i].oclass == sclass[j].oclass &&
+ mclass[i].version >= sclass[j].minver &&
+ mclass[i].version <= sclass[j].maxver) {
+ ret = i;
+ break;
+ }
+ }
+ }
+
+ nouveau_object_sclass_put(&sclass);
+ return ret;
+}
+
+static void
+nouveau_object_fini(struct nouveau_object *obj)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_del del;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_DEL,
+ };
+
+ if (obj->data) {
+ abi16_delete(obj);
+ free(obj->data);
+ obj->data = NULL;
+ return;
+ }
+
+ nouveau_object_ioctl(obj, &args, sizeof(args));
+}
+
+static int
+nouveau_object_init(struct nouveau_object *parent, uint32_t handle,
+ int32_t oclass, void *data, uint32_t size,
+ struct nouveau_object *obj)
+{
+ struct nouveau_drm *drm = nouveau_drm(parent);
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_new_v0 new;
+ } *args;
+ uint32_t argc = sizeof(*args) + size;
+ int (*func)(struct nouveau_object *);
+ int ret = -ENOSYS;
+
+ obj->parent = parent;
+ obj->handle = handle;
+ obj->oclass = oclass;
+ obj->length = 0;
+ obj->data = NULL;
+
+ if (!abi16_object(obj, &func) && drm->nvif) {
+ if (!(args = malloc(argc)))
+ return -ENOMEM;
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_NEW;
+ args->new.version = 0;
+ args->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
+ args->new.token = (unsigned long)(void *)obj;
+ args->new.object = (unsigned long)(void *)obj;
+ args->new.handle = handle;
+ args->new.oclass = oclass;
+ memcpy(args->new.data, data, size);
+ ret = nouveau_object_ioctl(parent, args, argc);
+ memcpy(data, args->new.data, size);
+ free(args);
+ } else
+ if (func) {
+ obj->length = size ? size : sizeof(struct nouveau_object *);
+ if (!(obj->data = malloc(obj->length)))
+ return -ENOMEM;
+ if (data)
+ memcpy(obj->data, data, obj->length);
+ *(struct nouveau_object **)obj->data = obj;
+
+ ret = func(obj);
+ }
+
+ if (ret) {
+ nouveau_object_fini(obj);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
+ uint32_t oclass, void *data, uint32_t length,
+ struct nouveau_object **pobj)
+{
+ struct nouveau_object *obj;
+ int ret;
+
+ if (!(obj = malloc(sizeof(*obj))))
+ return -ENOMEM;
+
+ ret = nouveau_object_init(parent, handle, oclass, data, length, obj);
+ if (ret) {
+ free(obj);
+ return ret;
+ }
+
+ *pobj = obj;
+ return 0;
+}
+
+void
+nouveau_object_del(struct nouveau_object **pobj)
+{
+ struct nouveau_object *obj = *pobj;
+ if (obj) {
+ nouveau_object_fini(obj);
+ free(obj);
+ *pobj = NULL;
+ }
+}
+
+void
+nouveau_drm_del(struct nouveau_drm **pdrm)
+{
+ free(*pdrm);
+ *pdrm = NULL;
+}
+
+int
+nouveau_drm_new(int fd, struct nouveau_drm **pdrm)
+{
+ struct nouveau_drm *drm;
+ drmVersionPtr ver;
+
+#ifdef DEBUG
+ debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
+#endif
+
+ if (!(drm = calloc(1, sizeof(*drm))))
+ return -ENOMEM;
+ drm->fd = fd;
+
+ if (!(ver = drmGetVersion(fd))) {
+ nouveau_drm_del(&drm);
+ return -EINVAL;
+ }
+ *pdrm = drm;
+
+ drm->version = (ver->version_major << 24) |
+ (ver->version_minor << 8) |
+ ver->version_patchlevel;
+ drm->nvif = (drm->version >= 0x01000301);
+ drmFreeVersion(ver);
+ return 0;
+}
+
+/* this is the old libdrm's version of nouveau_device_wrap(), the symbol
+ * is kept here to prevent AIGLX from crashing if the DDX is linked against
+ * the new libdrm, but the DRI driver against the old
+ */
+int
+nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
+ drm_context_t ctx)
+{
+ return -EACCES;
+}
+
+int
+nouveau_device_new(struct nouveau_object *parent, int32_t oclass,
+ void *data, uint32_t size, struct nouveau_device **pdev)
+{
+ struct nv_device_info_v0 info = {};
+ union {
+ struct nv_device_v0 v0;
+ } *args = data;
+ uint32_t argc = size;
+ struct nouveau_drm *drm = nouveau_drm(parent);
+ struct nouveau_device_priv *nvdev;
+ struct nouveau_device *dev;
+ uint64_t v;
+ char *tmp;
+ int ret = -ENOSYS;
+
+ if (oclass != NV_DEVICE ||
+ nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))
+ return ret;
+
+ if (!(nvdev = calloc(1, sizeof(*nvdev))))
+ return -ENOMEM;
+ dev = *pdev = &nvdev->base;
+ dev->fd = -1;
+
+ if (drm->nvif) {
+ ret = nouveau_object_init(parent, 0, oclass, args, argc,
+ &dev->object);
+ if (ret)
+ goto done;
+
+ info.version = 0;
+
+ ret = nouveau_object_mthd(&dev->object, NV_DEVICE_V0_INFO,
+ &info, sizeof(info));
+ if (ret)
+ goto done;
+
+ nvdev->base.chipset = info.chipset;
+ nvdev->have_bo_usage = true;
+ } else
+ if (args->v0.device == ~0ULL) {
+ nvdev->base.object.parent = &drm->client;
+ nvdev->base.object.handle = ~0ULL;
+ nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
+ nvdev->base.object.length = ~0;
+
+ ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &v);
+ if (ret)
+ goto done;
+ nvdev->base.chipset = v;
+
+ ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &v);
+ if (ret == 0)
+ nvdev->have_bo_usage = (v != 0);
+ } else
+ return -ENOSYS;
+
+ ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &v);
+ if (ret)
+ goto done;
+ nvdev->base.vram_size = v;
+
+ ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &v);
+ if (ret)
+ goto done;
+ nvdev->base.gart_size = v;
+
+ tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
+ if (tmp)
+ nvdev->vram_limit_percent = atoi(tmp);
+ else
+ nvdev->vram_limit_percent = 80;
+
+ nvdev->base.vram_limit =
+ (nvdev->base.vram_size * nvdev->vram_limit_percent) / 100;
+
+ tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
+ if (tmp)
+ nvdev->gart_limit_percent = atoi(tmp);
+ else
+ nvdev->gart_limit_percent = 80;
+
+ nvdev->base.gart_limit =
+ (nvdev->base.gart_size * nvdev->gart_limit_percent) / 100;
+
+ ret = pthread_mutex_init(&nvdev->lock, NULL);
+ DRMINITLISTHEAD(&nvdev->bo_list);
+done:
+ if (ret)
+ nouveau_device_del(pdev);
+ return ret;
+}
+
+int
+nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
+{
+ struct nouveau_drm *drm;
+ struct nouveau_device_priv *nvdev;
+ int ret;
+
+ ret = nouveau_drm_new(fd, &drm);
+ if (ret)
+ return ret;
+ drm->nvif = false;
+
+ ret = nouveau_device_new(&drm->client, NV_DEVICE,
+ &(struct nv_device_v0) {
+ .device = ~0ULL,
+ }, sizeof(struct nv_device_v0), pdev);
+ if (ret) {
+ nouveau_drm_del(&drm);
+ return ret;
+ }
+
+ nvdev = nouveau_device(*pdev);
+ nvdev->base.fd = drm->fd;
+ nvdev->base.drm_version = drm->version;
+ nvdev->close = close;
+ return 0;
+}
+
+int
+nouveau_device_open(const char *busid, struct nouveau_device **pdev)
+{
+ int ret = -ENODEV, fd = drmOpen("nouveau", busid);
+ if (fd >= 0) {
+ ret = nouveau_device_wrap(fd, 1, pdev);
+ if (ret)
+ drmClose(fd);
+ }
+ return ret;
+}
+
+void
+nouveau_device_del(struct nouveau_device **pdev)
+{
+ struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
+ if (nvdev) {
+ free(nvdev->client);
+ pthread_mutex_destroy(&nvdev->lock);
+ if (nvdev->base.fd >= 0) {
+ struct nouveau_drm *drm =
+ nouveau_drm(&nvdev->base.object);
+ nouveau_drm_del(&drm);
+ if (nvdev->close)
+ drmClose(nvdev->base.fd);
+ }
+ free(nvdev);
+ *pdev = NULL;
+ }
+}
+
+int
+nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
+{
+ struct nouveau_drm *drm = nouveau_drm(&dev->object);
+ struct drm_nouveau_getparam r = { .param = param };
+ int fd = drm->fd, ret =
+ drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
+ *value = r.value;
+ return ret;
+}
+
+int
+nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
+{
+ struct nouveau_drm *drm = nouveau_drm(&dev->object);
+ struct drm_nouveau_setparam r = { .param = param, .value = value };
+ return drmCommandWrite(drm->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
+}
+
+int
+nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
+{
+ struct nouveau_device_priv *nvdev = nouveau_device(dev);
+ struct nouveau_client_priv *pcli;
+ int id = 0, i, ret = -ENOMEM;
+ uint32_t *clients;
+
+ pthread_mutex_lock(&nvdev->lock);
+
+ for (i = 0; i < nvdev->nr_client; i++) {
+ id = ffs(nvdev->client[i]) - 1;
+ if (id >= 0)
+ goto out;
+ }
+
+ clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
+ if (!clients)
+ goto unlock;
+ nvdev->client = clients;
+ nvdev->client[i] = 0;
+ nvdev->nr_client++;
+
+out:
+ pcli = calloc(1, sizeof(*pcli));
+ if (pcli) {
+ nvdev->client[i] |= (1 << id);
+ pcli->base.device = dev;
+ pcli->base.id = (i * 32) + id;
+ ret = 0;
+ }
+
+ *pclient = &pcli->base;
+
+unlock:
+ pthread_mutex_unlock(&nvdev->lock);
+ return ret;
+}
+
+void
+nouveau_client_del(struct nouveau_client **pclient)
+{
+ struct nouveau_client_priv *pcli = nouveau_client(*pclient);
+ struct nouveau_device_priv *nvdev;
+ if (pcli) {
+ int id = pcli->base.id;
+ nvdev = nouveau_device(pcli->base.device);
+ pthread_mutex_lock(&nvdev->lock);
+ nvdev->client[id / 32] &= ~(1 << (id % 32));
+ pthread_mutex_unlock(&nvdev->lock);
+ free(pcli->kref);
+ free(pcli);
+ }
+}
+
+static void
+nouveau_bo_del(struct nouveau_bo *bo)
+{
+ struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
+ struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
+ struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
+ struct drm_gem_close req = { .handle = bo->handle };
+
+ if (nvbo->head.next) {
+ pthread_mutex_lock(&nvdev->lock);
+ if (atomic_read(&nvbo->refcnt) == 0) {
+ DRMLISTDEL(&nvbo->head);
+ /*
+ * This bo has to be closed with the lock held because
+ * gem handles are not refcounted. If a shared bo is
+ * closed and re-opened in another thread a race
+ * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle
+ * might cause the bo to be closed accidentally while
+ * re-importing.
+ */
+ drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ }
+ pthread_mutex_unlock(&nvdev->lock);
+ } else {
+ drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ }
+ if (bo->map)
+ drm_munmap(bo->map, bo->size);
+ free(nvbo);
+}
+
+int
+nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
+ uint64_t size, union nouveau_bo_config *config,
+ struct nouveau_bo **pbo)
+{
+ struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
+ struct nouveau_bo *bo = &nvbo->base;
+ int ret;
+
+ if (!nvbo)
+ return -ENOMEM;
+ atomic_set(&nvbo->refcnt, 1);
+ bo->device = dev;
+ bo->flags = flags;
+ bo->size = size;
+
+ ret = abi16_bo_init(bo, align, config);
+ if (ret) {
+ free(nvbo);
+ return ret;
+ }
+
+ *pbo = bo;
+ return 0;
+}
+
+static int
+nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
+ struct nouveau_bo **pbo, int name)
+{
+ struct nouveau_drm *drm = nouveau_drm(&dev->object);
+ struct nouveau_device_priv *nvdev = nouveau_device(dev);
+ struct drm_nouveau_gem_info req = { .handle = handle };
+ struct nouveau_bo_priv *nvbo;
+ int ret;
+
+ DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
+ if (nvbo->base.handle == handle) {
+ if (atomic_inc_return(&nvbo->refcnt) == 1) {
+ /*
+ * Uh oh, this bo is dead and someone else
+ * will free it, but because refcnt is
+ * now non-zero fortunately they won't
+ * call the ioctl to close the bo.
+ *
+ * Remove this bo from the list so other
+ * calls to nouveau_bo_wrap_locked will
+ * see our replacement nvbo.
+ */
+ DRMLISTDEL(&nvbo->head);
+ if (!name)
+ name = nvbo->name;
+ break;
+ }
+
+ *pbo = &nvbo->base;
+ return 0;
+ }
+ }
+
+ ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_INFO,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ nvbo = calloc(1, sizeof(*nvbo));
+ if (nvbo) {
+ atomic_set(&nvbo->refcnt, 1);
+ nvbo->base.device = dev;
+ abi16_bo_info(&nvbo->base, &req);
+ nvbo->name = name;
+ DRMLISTADD(&nvbo->head, &nvdev->bo_list);
+ *pbo = &nvbo->base;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+static void
+nouveau_bo_make_global(struct nouveau_bo_priv *nvbo)
+{
+ if (!nvbo->head.next) {
+ struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
+ pthread_mutex_lock(&nvdev->lock);
+ if (!nvbo->head.next)
+ DRMLISTADD(&nvbo->head, &nvdev->bo_list);
+ pthread_mutex_unlock(&nvdev->lock);
+ }
+}
+
+int
+nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
+ struct nouveau_bo **pbo)
+{
+ struct nouveau_device_priv *nvdev = nouveau_device(dev);
+ int ret;
+ pthread_mutex_lock(&nvdev->lock);
+ ret = nouveau_bo_wrap_locked(dev, handle, pbo, 0);
+ pthread_mutex_unlock(&nvdev->lock);
+ return ret;
+}
+
+int
+nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
+ struct nouveau_bo **pbo)
+{
+ struct nouveau_drm *drm = nouveau_drm(&dev->object);
+ struct nouveau_device_priv *nvdev = nouveau_device(dev);
+ struct nouveau_bo_priv *nvbo;
+ struct drm_gem_open req = { .name = name };
+ int ret;
+
+ pthread_mutex_lock(&nvdev->lock);
+ DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
+ if (nvbo->name == name) {
+ ret = nouveau_bo_wrap_locked(dev, nvbo->base.handle,
+ pbo, name);
+ pthread_mutex_unlock(&nvdev->lock);
+ return ret;
+ }
+ }
+
+ ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &req);
+ if (ret == 0) {
+ ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name);
+ }
+
+ pthread_mutex_unlock(&nvdev->lock);
+ return ret;
+}
+
+int
+nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
+{
+ struct drm_gem_flink req = { .handle = bo->handle };
+ struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
+ struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
+
+ *name = nvbo->name;
+ if (!*name) {
+ int ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_FLINK, &req);
+
+ if (ret) {
+ *name = 0;
+ return ret;
+ }
+ nvbo->name = *name = req.name;
+
+ nouveau_bo_make_global(nvbo);
+ }
+ return 0;
+}
+
+void
+nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
+{
+ struct nouveau_bo *ref = *pref;
+ if (bo) {
+ atomic_inc(&nouveau_bo(bo)->refcnt);
+ }
+ if (ref) {
+ if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
+ nouveau_bo_del(ref);
+ }
+ *pref = bo;
+}
+
+int
+nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
+ struct nouveau_bo **bo)
+{
+ struct nouveau_drm *drm = nouveau_drm(&dev->object);
+ struct nouveau_device_priv *nvdev = nouveau_device(dev);
+ int ret;
+ unsigned int handle;
+
+ nouveau_bo_ref(NULL, bo);
+
+ pthread_mutex_lock(&nvdev->lock);
+ ret = drmPrimeFDToHandle(drm->fd, prime_fd, &handle);
+ if (ret == 0) {
+ ret = nouveau_bo_wrap_locked(dev, handle, bo, 0);
+ }
+ pthread_mutex_unlock(&nvdev->lock);
+ return ret;
+}
+
+int
+nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
+{
+ struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
+ struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
+ int ret;
+
+ ret = drmPrimeHandleToFD(drm->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
+ if (ret)
+ return ret;
+
+ nouveau_bo_make_global(nvbo);
+ return 0;
+}
+
+int
+nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
+ struct nouveau_client *client)
+{
+ struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
+ struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
+ struct drm_nouveau_gem_cpu_prep req;
+ struct nouveau_pushbuf *push;
+ int ret = 0;
+
+ if (!(access & NOUVEAU_BO_RDWR))
+ return 0;
+
+ push = cli_push_get(client, bo);
+ if (push && push->channel)
+ nouveau_pushbuf_kick(push, push->channel);
+
+ if (!nvbo->head.next && !(nvbo->access & NOUVEAU_BO_WR) &&
+ !(access & NOUVEAU_BO_WR))
+ return 0;
+
+ req.handle = bo->handle;
+ req.flags = 0;
+ if (access & NOUVEAU_BO_WR)
+ req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
+ if (access & NOUVEAU_BO_NOBLOCK)
+ req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
+
+ ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GEM_CPU_PREP,
+ &req, sizeof(req));
+ if (ret == 0)
+ nvbo->access = 0;
+ return ret;
+}
+
+int
+nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
+ struct nouveau_client *client)
+{
+ struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
+ struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
+ if (bo->map == NULL) {
+ bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, drm->fd, nvbo->map_handle);
+ if (bo->map == MAP_FAILED) {
+ bo->map = NULL;
+ return -errno;
+ }
+ }
+ return nouveau_bo_wait(bo, access, client);
+}
diff --git a/chromium/third_party/libdrm/src/nouveau/nouveau.h b/chromium/third_party/libdrm/src/nouveau/nouveau.h
new file mode 100644
index 00000000000..335ce77dca7
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nouveau.h
@@ -0,0 +1,276 @@
+#ifndef __NOUVEAU_H__
+#define __NOUVEAU_H__
+
+#include <stdint.h>
+#include <stdbool.h>
+
+/* Supported class information, provided by the kernel */
+struct nouveau_sclass {
+ int32_t oclass;
+ int minver;
+ int maxver;
+};
+
+/* Client-provided array describing class versions that are desired.
+ *
+ * These are used to match against the kernel's list of supported classes.
+ */
+struct nouveau_mclass {
+ int32_t oclass;
+ int version;
+ void *data;
+};
+
+struct nouveau_object {
+ struct nouveau_object *parent;
+ uint64_t handle;
+ uint32_t oclass;
+ uint32_t length; /* deprecated */
+ void *data; /* deprecated */
+};
+
+int nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
+ uint32_t oclass, void *data, uint32_t length,
+ struct nouveau_object **);
+void nouveau_object_del(struct nouveau_object **);
+int nouveau_object_mthd(struct nouveau_object *, uint32_t mthd,
+ void *data, uint32_t size);
+int nouveau_object_sclass_get(struct nouveau_object *,
+ struct nouveau_sclass **);
+void nouveau_object_sclass_put(struct nouveau_sclass **);
+int nouveau_object_mclass(struct nouveau_object *,
+ const struct nouveau_mclass *);
+
+struct nouveau_drm {
+ struct nouveau_object client;
+ int fd;
+ uint32_t version;
+ bool nvif;
+};
+
+static inline struct nouveau_drm *
+nouveau_drm(struct nouveau_object *obj)
+{
+ while (obj && obj->parent)
+ obj = obj->parent;
+ return (struct nouveau_drm *)obj;
+}
+
+int nouveau_drm_new(int fd, struct nouveau_drm **);
+void nouveau_drm_del(struct nouveau_drm **);
+
+struct nouveau_device {
+ struct nouveau_object object;
+ int fd; /* deprecated */
+ uint32_t lib_version; /* deprecated */
+ uint32_t drm_version; /* deprecated */
+ uint32_t chipset;
+ uint64_t vram_size;
+ uint64_t gart_size;
+ uint64_t vram_limit;
+ uint64_t gart_limit;
+};
+
+int nouveau_device_new(struct nouveau_object *parent, int32_t oclass,
+ void *data, uint32_t size, struct nouveau_device **);
+void nouveau_device_del(struct nouveau_device **);
+
+int nouveau_getparam(struct nouveau_device *, uint64_t param, uint64_t *value);
+int nouveau_setparam(struct nouveau_device *, uint64_t param, uint64_t value);
+
+/* deprecated */
+int nouveau_device_wrap(int fd, int close, struct nouveau_device **);
+int nouveau_device_open(const char *busid, struct nouveau_device **);
+
+struct nouveau_client {
+ struct nouveau_device *device;
+ int id;
+};
+
+int nouveau_client_new(struct nouveau_device *, struct nouveau_client **);
+void nouveau_client_del(struct nouveau_client **);
+
+union nouveau_bo_config {
+ struct {
+#define NV04_BO_16BPP 0x00000001
+#define NV04_BO_32BPP 0x00000002
+#define NV04_BO_ZETA 0x00000004
+ uint32_t surf_flags;
+ uint32_t surf_pitch;
+ } nv04;
+ struct {
+ uint32_t memtype;
+ uint32_t tile_mode;
+ } nv50;
+ struct {
+ uint32_t memtype;
+ uint32_t tile_mode;
+ } nvc0;
+ uint32_t data[8];
+};
+
+#define NOUVEAU_BO_VRAM 0x00000001
+#define NOUVEAU_BO_GART 0x00000002
+#define NOUVEAU_BO_APER (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)
+#define NOUVEAU_BO_RD 0x00000100
+#define NOUVEAU_BO_WR 0x00000200
+#define NOUVEAU_BO_RDWR (NOUVEAU_BO_RD | NOUVEAU_BO_WR)
+#define NOUVEAU_BO_NOBLOCK 0x00000400
+#define NOUVEAU_BO_LOW 0x00001000
+#define NOUVEAU_BO_HIGH 0x00002000
+#define NOUVEAU_BO_OR 0x00004000
+#define NOUVEAU_BO_MAP 0x80000000
+#define NOUVEAU_BO_CONTIG 0x40000000
+#define NOUVEAU_BO_NOSNOOP 0x20000000
+#define NOUVEAU_BO_COHERENT 0x10000000
+
+struct nouveau_bo {
+ struct nouveau_device *device;
+ uint32_t handle;
+ uint64_t size;
+ uint32_t flags;
+ uint64_t offset;
+ void *map;
+ union nouveau_bo_config config;
+};
+
+int nouveau_bo_new(struct nouveau_device *, uint32_t flags, uint32_t align,
+ uint64_t size, union nouveau_bo_config *,
+ struct nouveau_bo **);
+int nouveau_bo_wrap(struct nouveau_device *, uint32_t handle,
+ struct nouveau_bo **);
+int nouveau_bo_name_ref(struct nouveau_device *v, uint32_t name,
+ struct nouveau_bo **);
+int nouveau_bo_name_get(struct nouveau_bo *, uint32_t *name);
+void nouveau_bo_ref(struct nouveau_bo *, struct nouveau_bo **);
+int nouveau_bo_map(struct nouveau_bo *, uint32_t access,
+ struct nouveau_client *);
+int nouveau_bo_wait(struct nouveau_bo *, uint32_t access,
+ struct nouveau_client *);
+int nouveau_bo_prime_handle_ref(struct nouveau_device *, int prime_fd,
+ struct nouveau_bo **);
+int nouveau_bo_set_prime(struct nouveau_bo *, int *prime_fd);
+
+struct nouveau_list {
+ struct nouveau_list *prev;
+ struct nouveau_list *next;
+};
+
+struct nouveau_bufref {
+ struct nouveau_list thead;
+ struct nouveau_bo *bo;
+ uint32_t packet;
+ uint32_t flags;
+ uint32_t data;
+ uint32_t vor;
+ uint32_t tor;
+ uint32_t priv_data;
+ void *priv;
+};
+
+struct nouveau_bufctx {
+ struct nouveau_client *client;
+ struct nouveau_list head;
+ struct nouveau_list pending;
+ struct nouveau_list current;
+ int relocs;
+};
+
+int nouveau_bufctx_new(struct nouveau_client *, int bins,
+ struct nouveau_bufctx **);
+void nouveau_bufctx_del(struct nouveau_bufctx **);
+struct nouveau_bufref *
+nouveau_bufctx_refn(struct nouveau_bufctx *, int bin,
+ struct nouveau_bo *, uint32_t flags);
+struct nouveau_bufref *
+nouveau_bufctx_mthd(struct nouveau_bufctx *, int bin, uint32_t packet,
+ struct nouveau_bo *, uint64_t data, uint32_t flags,
+ uint32_t vor, uint32_t tor);
+void nouveau_bufctx_reset(struct nouveau_bufctx *, int bin);
+
+struct nouveau_pushbuf_krec;
+struct nouveau_pushbuf {
+ struct nouveau_client *client;
+ struct nouveau_object *channel;
+ struct nouveau_bufctx *bufctx;
+ void (*kick_notify)(struct nouveau_pushbuf *);
+ void *user_priv;
+ uint32_t rsvd_kick;
+ uint32_t flags;
+ uint32_t *cur;
+ uint32_t *end;
+};
+
+struct nouveau_pushbuf_refn {
+ struct nouveau_bo *bo;
+ uint32_t flags;
+};
+
+int nouveau_pushbuf_new(struct nouveau_client *, struct nouveau_object *chan,
+ int nr, uint32_t size, bool immediate,
+ struct nouveau_pushbuf **);
+void nouveau_pushbuf_del(struct nouveau_pushbuf **);
+int nouveau_pushbuf_space(struct nouveau_pushbuf *, uint32_t dwords,
+ uint32_t relocs, uint32_t pushes);
+void nouveau_pushbuf_data(struct nouveau_pushbuf *, struct nouveau_bo *,
+ uint64_t offset, uint64_t length);
+int nouveau_pushbuf_refn(struct nouveau_pushbuf *,
+ struct nouveau_pushbuf_refn *, int nr);
+/* Emits a reloc into the push buffer at the current position, you *must*
+ * have previously added the referenced buffer to a buffer context, and
+ * validated it against the current push buffer.
+ */
+void nouveau_pushbuf_reloc(struct nouveau_pushbuf *, struct nouveau_bo *,
+ uint32_t data, uint32_t flags,
+ uint32_t vor, uint32_t tor);
+int nouveau_pushbuf_validate(struct nouveau_pushbuf *);
+uint32_t nouveau_pushbuf_refd(struct nouveau_pushbuf *, struct nouveau_bo *);
+int nouveau_pushbuf_kick(struct nouveau_pushbuf *, struct nouveau_object *chan);
+struct nouveau_bufctx *
+nouveau_pushbuf_bufctx(struct nouveau_pushbuf *, struct nouveau_bufctx *);
+
+#define NOUVEAU_DEVICE_CLASS 0x80000000
+#define NOUVEAU_FIFO_CHANNEL_CLASS 0x80000001
+#define NOUVEAU_NOTIFIER_CLASS 0x80000002
+
+struct nouveau_fifo {
+ struct nouveau_object *object;
+ uint32_t channel;
+ uint32_t pushbuf;
+ uint64_t unused1[3];
+};
+
+struct nv04_fifo {
+ struct nouveau_fifo base;
+ uint32_t vram;
+ uint32_t gart;
+ uint32_t notify;
+};
+
+struct nvc0_fifo {
+ struct nouveau_fifo base;
+ uint32_t notify;
+};
+
+#define NVE0_FIFO_ENGINE_GR 0x00000001
+#define NVE0_FIFO_ENGINE_VP 0x00000002
+#define NVE0_FIFO_ENGINE_PPP 0x00000004
+#define NVE0_FIFO_ENGINE_BSP 0x00000008
+#define NVE0_FIFO_ENGINE_CE0 0x00000010
+#define NVE0_FIFO_ENGINE_CE1 0x00000020
+#define NVE0_FIFO_ENGINE_ENC 0x00000040
+
+struct nve0_fifo {
+ struct {
+ struct nouveau_fifo base;
+ uint32_t notify;
+ };
+ uint32_t engine;
+};
+
+struct nv04_notify {
+ struct nouveau_object *object;
+ uint32_t offset;
+ uint32_t length;
+};
+#endif
diff --git a/chromium/third_party/libdrm/src/nouveau/nvif/cl0080.h b/chromium/third_party/libdrm/src/nouveau/nvif/cl0080.h
new file mode 100644
index 00000000000..331620a52af
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nvif/cl0080.h
@@ -0,0 +1,45 @@
+#ifndef __NVIF_CL0080_H__
+#define __NVIF_CL0080_H__
+
+struct nv_device_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 device; /* device identifier, ~0 for client default */
+};
+
+#define NV_DEVICE_V0_INFO 0x00
+#define NV_DEVICE_V0_TIME 0x01
+
+struct nv_device_info_v0 {
+ __u8 version;
+#define NV_DEVICE_INFO_V0_IGP 0x00
+#define NV_DEVICE_INFO_V0_PCI 0x01
+#define NV_DEVICE_INFO_V0_AGP 0x02
+#define NV_DEVICE_INFO_V0_PCIE 0x03
+#define NV_DEVICE_INFO_V0_SOC 0x04
+ __u8 platform;
+ __u16 chipset; /* from NV_PMC_BOOT_0 */
+ __u8 revision; /* from NV_PMC_BOOT_0 */
+#define NV_DEVICE_INFO_V0_TNT 0x01
+#define NV_DEVICE_INFO_V0_CELSIUS 0x02
+#define NV_DEVICE_INFO_V0_KELVIN 0x03
+#define NV_DEVICE_INFO_V0_RANKINE 0x04
+#define NV_DEVICE_INFO_V0_CURIE 0x05
+#define NV_DEVICE_INFO_V0_TESLA 0x06
+#define NV_DEVICE_INFO_V0_FERMI 0x07
+#define NV_DEVICE_INFO_V0_KEPLER 0x08
+#define NV_DEVICE_INFO_V0_MAXWELL 0x09
+ __u8 family;
+ __u8 pad06[2];
+ __u64 ram_size;
+ __u64 ram_user;
+ char chip[16];
+ char name[64];
+};
+
+struct nv_device_time_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 time;
+};
+#endif
diff --git a/chromium/third_party/libdrm/src/nouveau/nvif/cl9097.h b/chromium/third_party/libdrm/src/nouveau/nvif/cl9097.h
new file mode 100644
index 00000000000..4057676d298
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nvif/cl9097.h
@@ -0,0 +1,44 @@
+#ifndef __NVIF_CL9097_H__
+#define __NVIF_CL9097_H__
+
+#define FERMI_A_ZBC_COLOR 0x00
+#define FERMI_A_ZBC_DEPTH 0x01
+
+struct fermi_a_zbc_color_v0 {
+ __u8 version;
+#define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01
+#define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02
+#define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04
+#define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08
+#define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c
+#define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10
+#define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14
+#define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c
+#define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20
+#define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c
+#define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30
+#define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34
+#define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38
+#define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c
+#define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40
+ __u8 format;
+ __u8 index;
+ __u8 pad03[5];
+ __u32 ds[4];
+ __u32 l2[4];
+};
+
+struct fermi_a_zbc_depth_v0 {
+ __u8 version;
+#define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01
+ __u8 format;
+ __u8 index;
+ __u8 pad03[5];
+ __u32 ds;
+ __u32 l2;
+};
+#endif
diff --git a/chromium/third_party/libdrm/src/nouveau/nvif/class.h b/chromium/third_party/libdrm/src/nouveau/nvif/class.h
new file mode 100644
index 00000000000..4179cd65ac0
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nvif/class.h
@@ -0,0 +1,141 @@
+#ifndef __NVIF_CLASS_H__
+#define __NVIF_CLASS_H__
+
+/* these class numbers are made up by us, and not nvidia-assigned */
+#define NVIF_CLASS_CONTROL /* if0001.h */ -1
+#define NVIF_CLASS_PERFMON /* if0002.h */ -2
+#define NVIF_CLASS_PERFDOM /* if0003.h */ -3
+#define NVIF_CLASS_SW_NV04 /* if0004.h */ -4
+#define NVIF_CLASS_SW_NV10 /* if0005.h */ -5
+#define NVIF_CLASS_SW_NV50 /* if0005.h */ -6
+#define NVIF_CLASS_SW_GF100 /* if0005.h */ -7
+
+/* the below match nvidia-assigned (either in hw, or sw) class numbers */
+#define NV_DEVICE /* cl0080.h */ 0x00000080
+
+#define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002
+#define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003
+#define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d
+
+#define FERMI_TWOD_A 0x0000902d
+
+#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039
+
+#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
+#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
+
+#define NV04_DISP /* cl0046.h */ 0x00000046
+
+#define NV03_CHANNEL_DMA /* cl506b.h */ 0x0000006b
+#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
+#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
+#define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e
+#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e
+#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e
+
+#define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f
+#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
+#define FERMI_CHANNEL_GPFIFO /* cl906f.h */ 0x0000906f
+#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
+#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
+
+#define NV50_DISP /* cl5070.h */ 0x00005070
+#define G82_DISP /* cl5070.h */ 0x00008270
+#define GT200_DISP /* cl5070.h */ 0x00008370
+#define GT214_DISP /* cl5070.h */ 0x00008570
+#define GT206_DISP /* cl5070.h */ 0x00008870
+#define GF110_DISP /* cl5070.h */ 0x00009070
+#define GK104_DISP /* cl5070.h */ 0x00009170
+#define GK110_DISP /* cl5070.h */ 0x00009270
+#define GM107_DISP /* cl5070.h */ 0x00009470
+#define GM204_DISP /* cl5070.h */ 0x00009570
+
+#define NV31_MPEG 0x00003174
+#define G82_MPEG 0x00008274
+
+#define NV74_VP2 0x00007476
+
+#define NV50_DISP_CURSOR /* cl507a.h */ 0x0000507a
+#define G82_DISP_CURSOR /* cl507a.h */ 0x0000827a
+#define GT214_DISP_CURSOR /* cl507a.h */ 0x0000857a
+#define GF110_DISP_CURSOR /* cl507a.h */ 0x0000907a
+#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
+
+#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
+#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
+#define GT214_DISP_OVERLAY /* cl507b.h */ 0x0000857b
+#define GF110_DISP_OVERLAY /* cl507b.h */ 0x0000907b
+#define GK104_DISP_OVERLAY /* cl507b.h */ 0x0000917b
+
+#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
+#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
+#define GT200_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000837c
+#define GT214_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000857c
+#define GF110_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000907c
+#define GK104_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000917c
+#define GK110_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000927c
+
+#define NV50_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000507d
+#define G82_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000827d
+#define GT200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000837d
+#define GT214_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000857d
+#define GT206_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000887d
+#define GF110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000907d
+#define GK104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000917d
+#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
+#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
+#define GM204_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
+
+#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
+#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
+#define GT200_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000837e
+#define GT214_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000857e
+#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e
+#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
+
+#define FERMI_A /* cl9097.h */ 0x00009097
+#define FERMI_B /* cl9097.h */ 0x00009197
+#define FERMI_C /* cl9097.h */ 0x00009297
+
+#define KEPLER_A /* cl9097.h */ 0x0000a097
+#define KEPLER_B /* cl9097.h */ 0x0000a197
+#define KEPLER_C /* cl9097.h */ 0x0000a297
+
+#define MAXWELL_A /* cl9097.h */ 0x0000b097
+#define MAXWELL_B /* cl9097.h */ 0x0000b197
+
+#define NV74_BSP 0x000074b0
+
+#define GT212_MSVLD 0x000085b1
+#define IGT21A_MSVLD 0x000086b1
+#define G98_MSVLD 0x000088b1
+#define GF100_MSVLD 0x000090b1
+#define GK104_MSVLD 0x000095b1
+
+#define GT212_MSPDEC 0x000085b2
+#define G98_MSPDEC 0x000088b2
+#define GF100_MSPDEC 0x000090b2
+#define GK104_MSPDEC 0x000095b2
+
+#define GT212_MSPPP 0x000085b3
+#define G98_MSPPP 0x000088b3
+#define GF100_MSPPP 0x000090b3
+
+#define G98_SEC 0x000088b4
+
+#define GT212_DMA 0x000085b5
+#define FERMI_DMA 0x000090b5
+#define KEPLER_DMA_COPY_A 0x0000a0b5
+#define MAXWELL_DMA_COPY_A 0x0000b0b5
+
+#define FERMI_DECOMPRESS 0x000090b8
+
+#define FERMI_COMPUTE_A 0x000090c0
+#define FERMI_COMPUTE_B 0x000091c0
+#define KEPLER_COMPUTE_A 0x0000a0c0
+#define KEPLER_COMPUTE_B 0x0000a1c0
+#define MAXWELL_COMPUTE_A 0x0000b0c0
+#define MAXWELL_COMPUTE_B 0x0000b1c0
+
+#define NV74_CIPHER 0x000074c1
+#endif
diff --git a/chromium/third_party/libdrm/src/nouveau/nvif/if0002.h b/chromium/third_party/libdrm/src/nouveau/nvif/if0002.h
new file mode 100644
index 00000000000..c04c91d0b81
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nvif/if0002.h
@@ -0,0 +1,38 @@
+#ifndef __NVIF_IF0002_H__
+#define __NVIF_IF0002_H__
+
+#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
+#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
+#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
+
+struct nvif_perfmon_query_domain_v0 {
+ __u8 version;
+ __u8 id;
+ __u8 counter_nr;
+ __u8 iter;
+ __u16 signal_nr;
+ __u8 pad05[2];
+ char name[64];
+};
+
+struct nvif_perfmon_query_signal_v0 {
+ __u8 version;
+ __u8 domain;
+ __u16 iter;
+ __u8 signal;
+ __u8 source_nr;
+ __u8 pad05[2];
+ char name[64];
+};
+
+struct nvif_perfmon_query_source_v0 {
+ __u8 version;
+ __u8 domain;
+ __u8 signal;
+ __u8 iter;
+ __u8 pad04[4];
+ __u32 source;
+ __u32 mask;
+ char name[64];
+};
+#endif
diff --git a/chromium/third_party/libdrm/src/nouveau/nvif/if0003.h b/chromium/third_party/libdrm/src/nouveau/nvif/if0003.h
new file mode 100644
index 00000000000..0cd03efb80a
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nvif/if0003.h
@@ -0,0 +1,33 @@
+#ifndef __NVIF_IF0003_H__
+#define __NVIF_IF0003_H__
+
+struct nvif_perfdom_v0 {
+ __u8 version;
+ __u8 domain;
+ __u8 mode;
+ __u8 pad03[1];
+ struct {
+ __u8 signal[4];
+ __u64 source[4][8];
+ __u16 logic_op;
+ } ctr[4];
+};
+
+#define NVIF_PERFDOM_V0_INIT 0x00
+#define NVIF_PERFDOM_V0_SAMPLE 0x01
+#define NVIF_PERFDOM_V0_READ 0x02
+
+struct nvif_perfdom_init {
+};
+
+struct nvif_perfdom_sample {
+};
+
+struct nvif_perfdom_read_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u32 ctr[4];
+ __u32 clk;
+ __u8 pad04[4];
+};
+#endif
diff --git a/chromium/third_party/libdrm/src/nouveau/nvif/ioctl.h b/chromium/third_party/libdrm/src/nouveau/nvif/ioctl.h
new file mode 100644
index 00000000000..c5f5eb83a59
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nvif/ioctl.h
@@ -0,0 +1,132 @@
+#ifndef __NVIF_IOCTL_H__
+#define __NVIF_IOCTL_H__
+
+#define NVIF_VERSION_LATEST 0x0000000000000000ULL
+
+struct nvif_ioctl_v0 {
+ __u8 version;
+#define NVIF_IOCTL_V0_NOP 0x00
+#define NVIF_IOCTL_V0_SCLASS 0x01
+#define NVIF_IOCTL_V0_NEW 0x02
+#define NVIF_IOCTL_V0_DEL 0x03
+#define NVIF_IOCTL_V0_MTHD 0x04
+#define NVIF_IOCTL_V0_RD 0x05
+#define NVIF_IOCTL_V0_WR 0x06
+#define NVIF_IOCTL_V0_MAP 0x07
+#define NVIF_IOCTL_V0_UNMAP 0x08
+#define NVIF_IOCTL_V0_NTFY_NEW 0x09
+#define NVIF_IOCTL_V0_NTFY_DEL 0x0a
+#define NVIF_IOCTL_V0_NTFY_GET 0x0b
+#define NVIF_IOCTL_V0_NTFY_PUT 0x0c
+ __u8 type;
+ __u8 pad02[4];
+#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
+#define NVIF_IOCTL_V0_OWNER_ANY 0xff
+ __u8 owner;
+#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
+#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
+ __u8 route;
+ __u64 token;
+ __u64 object;
+ __u8 data[]; /* ioctl data (below) */
+};
+
+struct nvif_ioctl_nop_v0 {
+ __u64 version;
+};
+
+struct nvif_ioctl_sclass_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 count;
+ __u8 pad02[6];
+ struct nvif_ioctl_sclass_oclass_v0 {
+ __s32 oclass;
+ __s16 minver;
+ __s16 maxver;
+ } oclass[];
+};
+
+struct nvif_ioctl_new_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 pad01[6];
+ __u8 route;
+ __u64 token;
+ __u64 object;
+ __u32 handle;
+ __s32 oclass;
+ __u8 data[]; /* class data (class.h) */
+};
+
+struct nvif_ioctl_del {
+};
+
+struct nvif_ioctl_rd_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 size;
+ __u8 pad02[2];
+ __u32 data;
+ __u64 addr;
+};
+
+struct nvif_ioctl_wr_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 size;
+ __u8 pad02[2];
+ __u32 data;
+ __u64 addr;
+};
+
+struct nvif_ioctl_map_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 pad01[3];
+ __u32 length;
+ __u64 handle;
+};
+
+struct nvif_ioctl_unmap {
+};
+
+struct nvif_ioctl_ntfy_new_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 event;
+ __u8 index;
+ __u8 pad03[5];
+ __u8 data[]; /* event request data (event.h) */
+};
+
+struct nvif_ioctl_ntfy_del_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_ntfy_get_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_ntfy_put_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_mthd_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 method;
+ __u8 pad02[6];
+ __u8 data[]; /* method data (class.h) */
+};
+
+#endif
diff --git a/chromium/third_party/libdrm/src/nouveau/nvif/unpack.h b/chromium/third_party/libdrm/src/nouveau/nvif/unpack.h
new file mode 100644
index 00000000000..751bcf4930a
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/nvif/unpack.h
@@ -0,0 +1,28 @@
+#ifndef __NVIF_UNPACK_H__
+#define __NVIF_UNPACK_H__
+
+#define nvif_unvers(r,d,s,m) ({ \
+ void **_data = (d); __u32 *_size = (s); int _ret = (r); \
+ if (_ret == -ENOSYS && *_size == sizeof(m)) { \
+ *_data = NULL; \
+ *_size = _ret = 0; \
+ } \
+ _ret; \
+})
+
+#define nvif_unpack(r,d,s,m,vl,vh,x) ({ \
+ void **_data = (d); __u32 *_size = (s); \
+ int _ret = (r), _vl = (vl), _vh = (vh); \
+ if (_ret == -ENOSYS && *_size >= sizeof(m) && \
+ (m).version >= _vl && (m).version <= _vh) { \
+ *_data = (__u8 *)*_data + sizeof(m); \
+ *_size = *_size - sizeof(m); \
+ if (_ret = 0, !(x)) { \
+ _ret = *_size ? -E2BIG : 0; \
+ *_data = NULL; \
+ *_size = 0; \
+ } \
+ } \
+ _ret; \
+})
+#endif
diff --git a/chromium/third_party/libdrm/src/nouveau/private.h b/chromium/third_party/libdrm/src/nouveau/private.h
new file mode 100644
index 00000000000..83060f96524
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/private.h
@@ -0,0 +1,124 @@
+#ifndef __NOUVEAU_LIBDRM_PRIVATE_H__
+#define __NOUVEAU_LIBDRM_PRIVATE_H__
+
+#include <libdrm_macros.h>
+#include <xf86drm.h>
+#include <xf86atomic.h>
+#include <pthread.h>
+#include "nouveau_drm.h"
+
+#include "nouveau.h"
+
+#ifdef DEBUG
+drm_private uint32_t nouveau_debug;
+#define dbg_on(lvl) (nouveau_debug & (1 << lvl))
+#define dbg(lvl, fmt, args...) do { \
+ if (dbg_on((lvl))) \
+ fprintf(stderr, "nouveau: "fmt, ##args); \
+} while(0)
+#else
+#define dbg_on(lvl) (0)
+#define dbg(lvl, fmt, args...)
+#endif
+#define err(fmt, args...) fprintf(stderr, "nouveau: "fmt, ##args)
+
+struct nouveau_client_kref {
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ struct nouveau_pushbuf *push;
+};
+
+struct nouveau_client_priv {
+ struct nouveau_client base;
+ struct nouveau_client_kref *kref;
+ unsigned kref_nr;
+};
+
+static inline struct nouveau_client_priv *
+nouveau_client(struct nouveau_client *client)
+{
+ return (struct nouveau_client_priv *)client;
+}
+
+static inline struct drm_nouveau_gem_pushbuf_bo *
+cli_kref_get(struct nouveau_client *client, struct nouveau_bo *bo)
+{
+ struct nouveau_client_priv *pcli = nouveau_client(client);
+ struct drm_nouveau_gem_pushbuf_bo *kref = NULL;
+ if (pcli->kref_nr > bo->handle)
+ kref = pcli->kref[bo->handle].kref;
+ return kref;
+}
+
+static inline struct nouveau_pushbuf *
+cli_push_get(struct nouveau_client *client, struct nouveau_bo *bo)
+{
+ struct nouveau_client_priv *pcli = nouveau_client(client);
+ struct nouveau_pushbuf *push = NULL;
+ if (pcli->kref_nr > bo->handle)
+ push = pcli->kref[bo->handle].push;
+ return push;
+}
+
+static inline void
+cli_kref_set(struct nouveau_client *client, struct nouveau_bo *bo,
+ struct drm_nouveau_gem_pushbuf_bo *kref,
+ struct nouveau_pushbuf *push)
+{
+ struct nouveau_client_priv *pcli = nouveau_client(client);
+ if (pcli->kref_nr <= bo->handle) {
+ pcli->kref = realloc(pcli->kref,
+ sizeof(*pcli->kref) * bo->handle * 2);
+ while (pcli->kref_nr < bo->handle * 2) {
+ pcli->kref[pcli->kref_nr].kref = NULL;
+ pcli->kref[pcli->kref_nr].push = NULL;
+ pcli->kref_nr++;
+ }
+ }
+ pcli->kref[bo->handle].kref = kref;
+ pcli->kref[bo->handle].push = push;
+}
+
+struct nouveau_bo_priv {
+ struct nouveau_bo base;
+ struct nouveau_list head;
+ atomic_t refcnt;
+ uint64_t map_handle;
+ uint32_t name;
+ uint32_t access;
+};
+
+static inline struct nouveau_bo_priv *
+nouveau_bo(struct nouveau_bo *bo)
+{
+ return (struct nouveau_bo_priv *)bo;
+}
+
+struct nouveau_device_priv {
+ struct nouveau_device base;
+ int close;
+ pthread_mutex_t lock;
+ struct nouveau_list bo_list;
+ uint32_t *client;
+ int nr_client;
+ bool have_bo_usage;
+ int gart_limit_percent, vram_limit_percent;
+};
+
+static inline struct nouveau_device_priv *
+nouveau_device(struct nouveau_device *dev)
+{
+ return (struct nouveau_device_priv *)dev;
+}
+
+int
+nouveau_device_open_existing(struct nouveau_device **, int, int, drm_context_t);
+
+/* abi16.c */
+drm_private bool abi16_object(struct nouveau_object *, int (**)(struct nouveau_object *));
+drm_private void abi16_delete(struct nouveau_object *);
+drm_private int abi16_sclass(struct nouveau_object *, struct nouveau_sclass **);
+drm_private void abi16_bo_info(struct nouveau_bo *, struct drm_nouveau_gem_info *);
+drm_private int abi16_bo_init(struct nouveau_bo *, uint32_t alignment,
+ union nouveau_bo_config *);
+
+#endif
diff --git a/chromium/third_party/libdrm/src/nouveau/pushbuf.c b/chromium/third_party/libdrm/src/nouveau/pushbuf.c
new file mode 100644
index 00000000000..035e3019f2c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/nouveau/pushbuf.c
@@ -0,0 +1,781 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <xf86drm.h>
+#include <xf86atomic.h>
+#include "libdrm_lists.h"
+#include "nouveau_drm.h"
+
+#include "nouveau.h"
+#include "private.h"
+
+struct nouveau_pushbuf_krec {
+ struct nouveau_pushbuf_krec *next;
+ struct drm_nouveau_gem_pushbuf_bo buffer[NOUVEAU_GEM_MAX_BUFFERS];
+ struct drm_nouveau_gem_pushbuf_reloc reloc[NOUVEAU_GEM_MAX_RELOCS];
+ struct drm_nouveau_gem_pushbuf_push push[NOUVEAU_GEM_MAX_PUSH];
+ int nr_buffer;
+ int nr_reloc;
+ int nr_push;
+ uint64_t vram_used;
+ uint64_t gart_used;
+};
+
+struct nouveau_pushbuf_priv {
+ struct nouveau_pushbuf base;
+ struct nouveau_pushbuf_krec *list;
+ struct nouveau_pushbuf_krec *krec;
+ struct nouveau_list bctx_list;
+ struct nouveau_bo *bo;
+ uint32_t type;
+ uint32_t suffix0;
+ uint32_t suffix1;
+ uint32_t *ptr;
+ uint32_t *bgn;
+ int bo_next;
+ int bo_nr;
+ struct nouveau_bo *bos[];
+};
+
+static inline struct nouveau_pushbuf_priv *
+nouveau_pushbuf(struct nouveau_pushbuf *push)
+{
+ return (struct nouveau_pushbuf_priv *)push;
+}
+
+static int pushbuf_validate(struct nouveau_pushbuf *, bool);
+static int pushbuf_flush(struct nouveau_pushbuf *);
+
+static bool
+pushbuf_kref_fits(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
+ uint32_t *domains)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->krec;
+ struct nouveau_device *dev = push->client->device;
+ struct nouveau_bo *kbo;
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ int i;
+
+ /* VRAM is the only valid domain. GART and VRAM|GART buffers
+ * are all accounted to GART, so if this doesn't fit in VRAM
+ * straight up, a flush is needed.
+ */
+ if (*domains == NOUVEAU_GEM_DOMAIN_VRAM) {
+ if (krec->vram_used + bo->size > dev->vram_limit)
+ return false;
+ krec->vram_used += bo->size;
+ return true;
+ }
+
+ /* GART or VRAM|GART buffer. Account both of these buffer types
+ * to GART only for the moment, which simplifies things. If the
+ * buffer can fit already, we're done here.
+ */
+ if (krec->gart_used + bo->size <= dev->gart_limit) {
+ krec->gart_used += bo->size;
+ return true;
+ }
+
+ /* Ran out of GART space, if it's a VRAM|GART buffer and it'll
+ * fit into available VRAM, turn it into a VRAM buffer
+ */
+ if ((*domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ krec->vram_used + bo->size <= dev->vram_limit) {
+ *domains &= NOUVEAU_GEM_DOMAIN_VRAM;
+ krec->vram_used += bo->size;
+ return true;
+ }
+
+ /* Still couldn't fit the buffer in anywhere, so as a last resort;
+ * scan the buffer list for VRAM|GART buffers and turn them into
+ * VRAM buffers until we have enough space in GART for this one
+ */
+ kref = krec->buffer;
+ for (i = 0; i < krec->nr_buffer; i++, kref++) {
+ if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
+ continue;
+
+ kbo = (void *)(unsigned long)kref->user_priv;
+ if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ krec->vram_used + kbo->size > dev->vram_limit)
+ continue;
+
+ kref->valid_domains &= NOUVEAU_GEM_DOMAIN_VRAM;
+ krec->gart_used -= kbo->size;
+ krec->vram_used += kbo->size;
+ if (krec->gart_used + bo->size <= dev->gart_limit) {
+ krec->gart_used += bo->size;
+ return true;
+ }
+ }
+
+ /* Couldn't resolve a placement, need to force a flush */
+ return false;
+}
+
+static struct drm_nouveau_gem_pushbuf_bo *
+pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
+ uint32_t flags)
+{
+ struct nouveau_device *dev = push->client->device;
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->krec;
+ struct nouveau_pushbuf *fpush;
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ uint32_t domains, domains_wr, domains_rd;
+
+ domains = 0;
+ if (flags & NOUVEAU_BO_VRAM)
+ domains |= NOUVEAU_GEM_DOMAIN_VRAM;
+ if (flags & NOUVEAU_BO_GART)
+ domains |= NOUVEAU_GEM_DOMAIN_GART;
+ domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
+ domains_rd = domains * !!(flags & NOUVEAU_BO_RD);
+
+ /* if buffer is referenced on another pushbuf that is owned by the
+ * same client, we need to flush the other pushbuf first to ensure
+ * the correct ordering of commands
+ */
+ fpush = cli_push_get(push->client, bo);
+ if (fpush && fpush != push)
+ pushbuf_flush(fpush);
+
+ kref = cli_kref_get(push->client, bo);
+ if (kref) {
+ /* possible conflict in memory types - flush and retry */
+ if (!(kref->valid_domains & domains))
+ return NULL;
+
+ /* VRAM|GART buffer turning into a VRAM buffer. Make sure
+ * it'll fit in VRAM and force a flush if not.
+ */
+ if ((kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+ ( domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
+ if (krec->vram_used + bo->size > dev->vram_limit)
+ return NULL;
+ krec->vram_used += bo->size;
+ krec->gart_used -= bo->size;
+ }
+
+ kref->valid_domains &= domains;
+ kref->write_domains |= domains_wr;
+ kref->read_domains |= domains_rd;
+ } else {
+ if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
+ !pushbuf_kref_fits(push, bo, &domains))
+ return NULL;
+
+ kref = &krec->buffer[krec->nr_buffer++];
+ kref->user_priv = (unsigned long)bo;
+ kref->handle = bo->handle;
+ kref->valid_domains = domains;
+ kref->write_domains = domains_wr;
+ kref->read_domains = domains_rd;
+ kref->presumed.valid = 1;
+ kref->presumed.offset = bo->offset;
+ if (bo->flags & NOUVEAU_BO_VRAM)
+ kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ else
+ kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
+
+ cli_kref_set(push->client, bo, kref, push);
+ atomic_inc(&nouveau_bo(bo)->refcnt);
+ }
+
+ return kref;
+}
+
+static uint32_t
+pushbuf_krel(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
+ uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->krec;
+ struct drm_nouveau_gem_pushbuf_reloc *krel;
+ struct drm_nouveau_gem_pushbuf_bo *pkref;
+ struct drm_nouveau_gem_pushbuf_bo *bkref;
+ uint32_t reloc = data;
+
+ pkref = cli_kref_get(push->client, nvpb->bo);
+ bkref = cli_kref_get(push->client, bo);
+ krel = &krec->reloc[krec->nr_reloc++];
+
+ assert(pkref);
+ assert(bkref);
+ krel->reloc_bo_index = pkref - krec->buffer;
+ krel->reloc_bo_offset = (push->cur - nvpb->ptr) * 4;
+ krel->bo_index = bkref - krec->buffer;
+ krel->flags = 0;
+ krel->data = data;
+ krel->vor = vor;
+ krel->tor = tor;
+
+ if (flags & NOUVEAU_BO_LOW) {
+ reloc = (bkref->presumed.offset + data);
+ krel->flags |= NOUVEAU_GEM_RELOC_LOW;
+ } else
+ if (flags & NOUVEAU_BO_HIGH) {
+ reloc = (bkref->presumed.offset + data) >> 32;
+ krel->flags |= NOUVEAU_GEM_RELOC_HIGH;
+ }
+ if (flags & NOUVEAU_BO_OR) {
+ if (bkref->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ reloc |= vor;
+ else
+ reloc |= tor;
+ krel->flags |= NOUVEAU_GEM_RELOC_OR;
+ }
+
+ return reloc;
+}
+
+static void
+pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
+{
+ struct drm_nouveau_gem_pushbuf_reloc *krel;
+ struct drm_nouveau_gem_pushbuf_push *kpsh;
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ struct nouveau_bo *bo;
+ uint32_t *bgn, *end;
+ int i;
+
+ err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid,
+ krec_id, krec->nr_push, krec->nr_buffer, krec->nr_reloc);
+
+ kref = krec->buffer;
+ for (i = 0; i < krec->nr_buffer; i++, kref++) {
+ err("ch%d: buf %08x %08x %08x %08x %08x\n", chid, i,
+ kref->handle, kref->valid_domains,
+ kref->read_domains, kref->write_domains);
+ }
+
+ krel = krec->reloc;
+ for (i = 0; i < krec->nr_reloc; i++, krel++) {
+ err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
+ chid, krel->reloc_bo_index, krel->reloc_bo_offset,
+ krel->bo_index, krel->flags, krel->data,
+ krel->vor, krel->tor);
+ }
+
+ kpsh = krec->push;
+ for (i = 0; i < krec->nr_push; i++, kpsh++) {
+ kref = krec->buffer + kpsh->bo_index;
+ bo = (void *)(unsigned long)kref->user_priv;
+ bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
+ end = bgn + (kpsh->length /4);
+
+ err("ch%d: psh %08x %010llx %010llx\n", chid, kpsh->bo_index,
+ (unsigned long long)kpsh->offset,
+ (unsigned long long)(kpsh->offset + kpsh->length));
+ while (bgn < end)
+ err("\t0x%08x\n", *bgn++);
+ }
+}
+
+static int
+pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->list;
+ struct nouveau_device *dev = push->client->device;
+ struct nouveau_drm *drm = nouveau_drm(&dev->object);
+ struct drm_nouveau_gem_pushbuf_bo_presumed *info;
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ struct drm_nouveau_gem_pushbuf req;
+ struct nouveau_fifo *fifo = chan->data;
+ struct nouveau_bo *bo;
+ int krec_id = 0;
+ int ret = 0, i;
+
+ if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
+ return -EINVAL;
+
+ if (push->kick_notify)
+ push->kick_notify(push);
+
+ nouveau_pushbuf_data(push, NULL, 0, 0);
+
+ while (krec && krec->nr_push) {
+ req.channel = fifo->channel;
+ req.nr_buffers = krec->nr_buffer;
+ req.buffers = (uint64_t)(unsigned long)krec->buffer;
+ req.nr_relocs = krec->nr_reloc;
+ req.nr_push = krec->nr_push;
+ req.relocs = (uint64_t)(unsigned long)krec->reloc;
+ req.push = (uint64_t)(unsigned long)krec->push;
+ req.suffix0 = nvpb->suffix0;
+ req.suffix1 = nvpb->suffix1;
+ req.vram_available = 0; /* for valgrind */
+ req.gart_available = 0;
+
+ if (dbg_on(0))
+ pushbuf_dump(krec, krec_id++, fifo->channel);
+
+#ifndef SIMULATE
+ ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
+ &req, sizeof(req));
+ nvpb->suffix0 = req.suffix0;
+ nvpb->suffix1 = req.suffix1;
+ dev->vram_limit = (req.vram_available *
+ nouveau_device(dev)->vram_limit_percent) / 100;
+ dev->gart_limit = (req.gart_available *
+ nouveau_device(dev)->gart_limit_percent) / 100;
+#else
+ if (dbg_on(31))
+ ret = -EINVAL;
+#endif
+
+ if (ret) {
+ err("kernel rejected pushbuf: %s\n", strerror(-ret));
+ pushbuf_dump(krec, krec_id++, fifo->channel);
+ break;
+ }
+
+ kref = krec->buffer;
+ for (i = 0; i < krec->nr_buffer; i++, kref++) {
+ bo = (void *)(unsigned long)kref->user_priv;
+
+ info = &kref->presumed;
+ if (!info->valid) {
+ bo->flags &= ~NOUVEAU_BO_APER;
+ if (info->domain == NOUVEAU_GEM_DOMAIN_VRAM)
+ bo->flags |= NOUVEAU_BO_VRAM;
+ else
+ bo->flags |= NOUVEAU_BO_GART;
+ bo->offset = info->offset;
+ }
+
+ if (kref->write_domains)
+ nouveau_bo(bo)->access |= NOUVEAU_BO_WR;
+ if (kref->read_domains)
+ nouveau_bo(bo)->access |= NOUVEAU_BO_RD;
+ }
+
+ krec = krec->next;
+ }
+
+ return ret;
+}
+
+static int
+pushbuf_flush(struct nouveau_pushbuf *push)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->krec;
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ struct nouveau_bufctx *bctx, *btmp;
+ struct nouveau_bo *bo;
+ int ret = 0, i;
+
+ if (push->channel) {
+ ret = pushbuf_submit(push, push->channel);
+ } else {
+ nouveau_pushbuf_data(push, NULL, 0, 0);
+ krec->next = malloc(sizeof(*krec));
+ nvpb->krec = krec->next;
+ }
+
+ kref = krec->buffer;
+ for (i = 0; i < krec->nr_buffer; i++, kref++) {
+ bo = (void *)(unsigned long)kref->user_priv;
+ cli_kref_set(push->client, bo, NULL, NULL);
+ if (push->channel)
+ nouveau_bo_ref(NULL, &bo);
+ }
+
+ krec = nvpb->krec;
+ krec->vram_used = 0;
+ krec->gart_used = 0;
+ krec->nr_buffer = 0;
+ krec->nr_reloc = 0;
+ krec->nr_push = 0;
+
+ DRMLISTFOREACHENTRYSAFE(bctx, btmp, &nvpb->bctx_list, head) {
+ DRMLISTJOIN(&bctx->current, &bctx->pending);
+ DRMINITLISTHEAD(&bctx->current);
+ DRMLISTDELINIT(&bctx->head);
+ }
+
+ return ret;
+}
+
+static void
+pushbuf_refn_fail(struct nouveau_pushbuf *push, int sref, int srel)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->krec;
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+
+ kref = krec->buffer + sref;
+ while (krec->nr_buffer-- > sref) {
+ struct nouveau_bo *bo = (void *)(unsigned long)kref->user_priv;
+ cli_kref_set(push->client, bo, NULL, NULL);
+ nouveau_bo_ref(NULL, &bo);
+ kref++;
+ }
+ krec->nr_buffer = sref;
+ krec->nr_reloc = srel;
+}
+
+static int
+pushbuf_refn(struct nouveau_pushbuf *push, bool retry,
+ struct nouveau_pushbuf_refn *refs, int nr)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->krec;
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ int sref = krec->nr_buffer;
+ int ret = 0, i;
+
+ for (i = 0; i < nr; i++) {
+ kref = pushbuf_kref(push, refs[i].bo, refs[i].flags);
+ if (!kref) {
+ ret = -ENOSPC;
+ break;
+ }
+ }
+
+ if (ret) {
+ pushbuf_refn_fail(push, sref, krec->nr_reloc);
+ if (retry) {
+ pushbuf_flush(push);
+ nouveau_pushbuf_space(push, 0, 0, 0);
+ return pushbuf_refn(push, false, refs, nr);
+ }
+ }
+
+ return ret;
+}
+
+static int
+pushbuf_validate(struct nouveau_pushbuf *push, bool retry)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->krec;
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ struct nouveau_bufctx *bctx = push->bufctx;
+ struct nouveau_bufref *bref;
+ int relocs = bctx ? bctx->relocs * 2: 0;
+ int sref, srel, ret;
+
+ ret = nouveau_pushbuf_space(push, relocs, relocs, 0);
+ if (ret || bctx == NULL)
+ return ret;
+
+ sref = krec->nr_buffer;
+ srel = krec->nr_reloc;
+
+ DRMLISTDEL(&bctx->head);
+ DRMLISTADD(&bctx->head, &nvpb->bctx_list);
+
+ DRMLISTFOREACHENTRY(bref, &bctx->pending, thead) {
+ kref = pushbuf_kref(push, bref->bo, bref->flags);
+ if (!kref) {
+ ret = -ENOSPC;
+ break;
+ }
+
+ if (bref->packet) {
+ pushbuf_krel(push, bref->bo, bref->packet, 0, 0, 0);
+ *push->cur++ = 0;
+ pushbuf_krel(push, bref->bo, bref->data, bref->flags,
+ bref->vor, bref->tor);
+ *push->cur++ = 0;
+ }
+ }
+
+ DRMLISTJOIN(&bctx->pending, &bctx->current);
+ DRMINITLISTHEAD(&bctx->pending);
+
+ if (ret) {
+ pushbuf_refn_fail(push, sref, srel);
+ if (retry) {
+ pushbuf_flush(push);
+ return pushbuf_validate(push, false);
+ }
+ }
+
+ return ret;
+}
+
+int
+nouveau_pushbuf_new(struct nouveau_client *client, struct nouveau_object *chan,
+ int nr, uint32_t size, bool immediate,
+ struct nouveau_pushbuf **ppush)
+{
+ struct nouveau_drm *drm = nouveau_drm(&client->device->object);
+ struct nouveau_fifo *fifo = chan->data;
+ struct nouveau_pushbuf_priv *nvpb;
+ struct nouveau_pushbuf *push;
+ struct drm_nouveau_gem_pushbuf req = {};
+ int ret;
+
+ if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
+ return -EINVAL;
+
+ /* nop pushbuf call, to get the current "return to main" sequence
+ * we need to append to the pushbuf on early chipsets
+ */
+ req.channel = fifo->channel;
+ req.nr_push = 0;
+ ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
+ &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ nvpb = calloc(1, sizeof(*nvpb) + nr * sizeof(*nvpb->bos));
+ if (!nvpb)
+ return -ENOMEM;
+
+#ifndef SIMULATE
+ nvpb->suffix0 = req.suffix0;
+ nvpb->suffix1 = req.suffix1;
+#else
+ nvpb->suffix0 = 0xffffffff;
+ nvpb->suffix1 = 0xffffffff;
+#endif
+ nvpb->krec = calloc(1, sizeof(*nvpb->krec));
+ nvpb->list = nvpb->krec;
+ if (!nvpb->krec) {
+ free(nvpb);
+ return -ENOMEM;
+ }
+
+ push = &nvpb->base;
+ push->client = client;
+ push->channel = immediate ? chan : NULL;
+ push->flags = NOUVEAU_BO_RD;
+ if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_GART) {
+ push->flags |= NOUVEAU_BO_GART;
+ nvpb->type = NOUVEAU_BO_GART;
+ } else
+ if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_VRAM) {
+ push->flags |= NOUVEAU_BO_VRAM;
+ nvpb->type = NOUVEAU_BO_VRAM;
+ }
+ nvpb->type |= NOUVEAU_BO_MAP;
+
+ for (nvpb->bo_nr = 0; nvpb->bo_nr < nr; nvpb->bo_nr++) {
+ ret = nouveau_bo_new(client->device, nvpb->type, 0, size,
+ NULL, &nvpb->bos[nvpb->bo_nr]);
+ if (ret) {
+ nouveau_pushbuf_del(&push);
+ return ret;
+ }
+ }
+
+ DRMINITLISTHEAD(&nvpb->bctx_list);
+ *ppush = push;
+ return 0;
+}
+
+void
+nouveau_pushbuf_del(struct nouveau_pushbuf **ppush)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(*ppush);
+ if (nvpb) {
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ struct nouveau_pushbuf_krec *krec;
+ while ((krec = nvpb->list)) {
+ kref = krec->buffer;
+ while (krec->nr_buffer--) {
+ unsigned long priv = kref++->user_priv;
+ struct nouveau_bo *bo = (void *)priv;
+ cli_kref_set(nvpb->base.client, bo, NULL, NULL);
+ nouveau_bo_ref(NULL, &bo);
+ }
+ nvpb->list = krec->next;
+ free(krec);
+ }
+ while (nvpb->bo_nr--)
+ nouveau_bo_ref(NULL, &nvpb->bos[nvpb->bo_nr]);
+ nouveau_bo_ref(NULL, &nvpb->bo);
+ free(nvpb);
+ }
+ *ppush = NULL;
+}
+
+struct nouveau_bufctx *
+nouveau_pushbuf_bufctx(struct nouveau_pushbuf *push, struct nouveau_bufctx *ctx)
+{
+ struct nouveau_bufctx *prev = push->bufctx;
+ push->bufctx = ctx;
+ return prev;
+}
+
+int
+nouveau_pushbuf_space(struct nouveau_pushbuf *push,
+ uint32_t dwords, uint32_t relocs, uint32_t pushes)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->krec;
+ struct nouveau_client *client = push->client;
+ struct nouveau_bo *bo = NULL;
+ bool flushed = false;
+ int ret = 0;
+
+ /* switch to next buffer if insufficient space in the current one */
+ if (push->cur + dwords >= push->end) {
+ if (nvpb->bo_next < nvpb->bo_nr) {
+ nouveau_bo_ref(nvpb->bos[nvpb->bo_next++], &bo);
+ if (nvpb->bo_next == nvpb->bo_nr && push->channel)
+ nvpb->bo_next = 0;
+ } else {
+ ret = nouveau_bo_new(client->device, nvpb->type, 0,
+ nvpb->bos[0]->size, NULL, &bo);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* make sure there's always enough space to queue up the pending
+ * data in the pushbuf proper
+ */
+ pushes++;
+
+ /* need to flush if we've run out of space on an immediate pushbuf,
+ * if the new buffer won't fit, or if the kernel push/reloc limits
+ * have been hit
+ */
+ if ((bo && ( push->channel ||
+ !pushbuf_kref(push, bo, push->flags))) ||
+ krec->nr_reloc + relocs >= NOUVEAU_GEM_MAX_RELOCS ||
+ krec->nr_push + pushes >= NOUVEAU_GEM_MAX_PUSH) {
+ if (nvpb->bo && krec->nr_buffer)
+ pushbuf_flush(push);
+ flushed = true;
+ }
+
+ /* if necessary, switch to new buffer */
+ if (bo) {
+ ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, push->client);
+ if (ret)
+ return ret;
+
+ nouveau_pushbuf_data(push, NULL, 0, 0);
+ nouveau_bo_ref(bo, &nvpb->bo);
+ nouveau_bo_ref(NULL, &bo);
+
+ nvpb->bgn = nvpb->bo->map;
+ nvpb->ptr = nvpb->bgn;
+ push->cur = nvpb->bgn;
+ push->end = push->cur + (nvpb->bo->size / 4);
+ push->end -= 2 + push->rsvd_kick; /* space for suffix */
+ }
+
+ pushbuf_kref(push, nvpb->bo, push->flags);
+ return flushed ? pushbuf_validate(push, false) : 0;
+}
+
+void
+nouveau_pushbuf_data(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
+ uint64_t offset, uint64_t length)
+{
+ struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
+ struct nouveau_pushbuf_krec *krec = nvpb->krec;
+ struct drm_nouveau_gem_pushbuf_push *kpsh;
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+
+ if (bo != nvpb->bo && nvpb->bgn != push->cur) {
+ if (nvpb->suffix0 || nvpb->suffix1) {
+ *push->cur++ = nvpb->suffix0;
+ *push->cur++ = nvpb->suffix1;
+ }
+
+ nouveau_pushbuf_data(push, nvpb->bo,
+ (nvpb->bgn - nvpb->ptr) * 4,
+ (push->cur - nvpb->bgn) * 4);
+ nvpb->bgn = push->cur;
+ }
+
+ if (bo) {
+ kref = cli_kref_get(push->client, bo);
+ assert(kref);
+ kpsh = &krec->push[krec->nr_push++];
+ kpsh->bo_index = kref - krec->buffer;
+ kpsh->offset = offset;
+ kpsh->length = length;
+ }
+}
+
+int
+nouveau_pushbuf_refn(struct nouveau_pushbuf *push,
+ struct nouveau_pushbuf_refn *refs, int nr)
+{
+ return pushbuf_refn(push, true, refs, nr);
+}
+
+void
+nouveau_pushbuf_reloc(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
+ uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
+{
+ *push->cur = pushbuf_krel(push, bo, data, flags, vor, tor);
+ push->cur++;
+}
+
+int
+nouveau_pushbuf_validate(struct nouveau_pushbuf *push)
+{
+ return pushbuf_validate(push, true);
+}
+
+uint32_t
+nouveau_pushbuf_refd(struct nouveau_pushbuf *push, struct nouveau_bo *bo)
+{
+ struct drm_nouveau_gem_pushbuf_bo *kref;
+ uint32_t flags = 0;
+
+ if (cli_push_get(push->client, bo) == push) {
+ kref = cli_kref_get(push->client, bo);
+ assert(kref);
+ if (kref->read_domains)
+ flags |= NOUVEAU_BO_RD;
+ if (kref->write_domains)
+ flags |= NOUVEAU_BO_WR;
+ }
+
+ return flags;
+}
+
+int
+nouveau_pushbuf_kick(struct nouveau_pushbuf *push, struct nouveau_object *chan)
+{
+ if (!push->channel)
+ return pushbuf_submit(push, chan);
+ pushbuf_flush(push);
+ return pushbuf_validate(push, false);
+}
diff --git a/chromium/third_party/libdrm/src/omap/Makefile.am b/chromium/third_party/libdrm/src/omap/Makefile.am
new file mode 100644
index 00000000000..599bb9ded50
--- /dev/null
+++ b/chromium/third_party/libdrm/src/omap/Makefile.am
@@ -0,0 +1,24 @@
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ $(PTHREADSTUBS_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_omap_la_LTLIBRARIES = libdrm_omap.la
+libdrm_omap_ladir = $(libdir)
+libdrm_omap_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libdrm_omap_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
+
+libdrm_omap_la_SOURCES = omap_drm.c
+
+libdrm_omapcommonincludedir = ${includedir}/omap
+libdrm_omapcommoninclude_HEADERS = omap_drm.h
+
+libdrm_omapincludedir = ${includedir}/libdrm
+libdrm_omapinclude_HEADERS = omap_drmif.h
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm_omap.pc
+
+TESTS = omap-symbol-check
+EXTRA_DIST = $(TESTS)
diff --git a/chromium/third_party/libdrm/src/omap/libdrm_omap.pc.in b/chromium/third_party/libdrm/src/omap/libdrm_omap.pc.in
new file mode 100644
index 00000000000..024533bd871
--- /dev/null
+++ b/chromium/third_party/libdrm/src/omap/libdrm_omap.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_omap
+Description: Userspace interface to omap kernel DRM services
+Version: 0.6
+Libs: -L${libdir} -ldrm_omap
+Cflags: -I${includedir} -I${includedir}/libdrm -I${includedir}/omap
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/omap/omap-symbol-check b/chromium/third_party/libdrm/src/omap/omap-symbol-check
new file mode 100755
index 00000000000..759c84bd3d9
--- /dev/null
+++ b/chromium/third_party/libdrm/src/omap/omap-symbol-check
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# The following symbols (past the first five) are taken from the public headers.
+# A list of the latter should be available Makefile.am/libdrm_omap*HEADERS
+
+FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_omap.so} | awk '{print $3}'| while read func; do
+( grep -q "^$func$" || echo $func ) <<EOF
+__bss_start
+_edata
+_end
+_fini
+_init
+omap_bo_cpu_fini
+omap_bo_cpu_prep
+omap_bo_del
+omap_bo_dmabuf
+omap_bo_from_dmabuf
+omap_bo_from_name
+omap_bo_get_name
+omap_bo_handle
+omap_bo_map
+omap_bo_new
+omap_bo_new_tiled
+omap_bo_ref
+omap_bo_size
+omap_device_del
+omap_device_new
+omap_device_ref
+omap_get_param
+omap_set_param
+EOF
+done)
+
+test ! -n "$FUNCS" || echo $FUNCS
+test ! -n "$FUNCS"
diff --git a/chromium/third_party/libdrm/src/omap/omap_drm.c b/chromium/third_party/libdrm/src/omap/omap_drm.c
new file mode 100644
index 00000000000..08ba64eb694
--- /dev/null
+++ b/chromium/third_party/libdrm/src/omap/omap_drm.c
@@ -0,0 +1,478 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2011 Texas Instruments, Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <rob@ti.com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <pthread.h>
+
+#include <libdrm_macros.h>
+#include <xf86drm.h>
+#include <xf86atomic.h>
+
+#include "omap_drm.h"
+#include "omap_drmif.h"
+
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define PAGE_SIZE 4096
+
+static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
+static void * dev_table;
+
+struct omap_device {
+ int fd;
+ atomic_t refcnt;
+
+ /* The handle_table is used to track GEM bo handles associated w/
+ * this fd. This is needed, in particular, when importing
+ * dmabuf's because we don't want multiple 'struct omap_bo's
+ * floating around with the same handle. Otherwise, when the
+ * first one is omap_bo_del()'d the handle becomes no longer
+ * valid, and the remaining 'struct omap_bo's are left pointing
+ * to an invalid handle (and possible a GEM bo that is already
+ * free'd).
+ */
+ void *handle_table;
+};
+
+/* a GEM buffer object allocated from the DRM device */
+struct omap_bo {
+ struct omap_device *dev;
+ void *map; /* userspace mmap'ing (if there is one) */
+ uint32_t size;
+ uint32_t handle;
+ uint32_t name; /* flink global handle (DRI2 name) */
+ uint64_t offset; /* offset to mmap() */
+ int fd; /* dmabuf handle */
+ atomic_t refcnt;
+};
+
+static struct omap_device * omap_device_new_impl(int fd)
+{
+ struct omap_device *dev = calloc(sizeof(*dev), 1);
+ if (!dev)
+ return NULL;
+ dev->fd = fd;
+ atomic_set(&dev->refcnt, 1);
+ dev->handle_table = drmHashCreate();
+ return dev;
+}
+
+struct omap_device * omap_device_new(int fd)
+{
+ struct omap_device *dev = NULL;
+
+ pthread_mutex_lock(&table_lock);
+
+ if (!dev_table)
+ dev_table = drmHashCreate();
+
+ if (drmHashLookup(dev_table, fd, (void **)&dev)) {
+ /* not found, create new device */
+ dev = omap_device_new_impl(fd);
+ drmHashInsert(dev_table, fd, dev);
+ } else {
+ /* found, just incr refcnt */
+ dev = omap_device_ref(dev);
+ }
+
+ pthread_mutex_unlock(&table_lock);
+
+ return dev;
+}
+
+struct omap_device * omap_device_ref(struct omap_device *dev)
+{
+ atomic_inc(&dev->refcnt);
+ return dev;
+}
+
+void omap_device_del(struct omap_device *dev)
+{
+ if (!atomic_dec_and_test(&dev->refcnt))
+ return;
+ pthread_mutex_lock(&table_lock);
+ drmHashDestroy(dev->handle_table);
+ drmHashDelete(dev_table, dev->fd);
+ pthread_mutex_unlock(&table_lock);
+ free(dev);
+}
+
+int
+omap_get_param(struct omap_device *dev, uint64_t param, uint64_t *value)
+{
+ struct drm_omap_param req = {
+ .param = param,
+ };
+ int ret;
+
+ ret = drmCommandWriteRead(dev->fd, DRM_OMAP_GET_PARAM, &req, sizeof(req));
+ if (ret) {
+ return ret;
+ }
+
+ *value = req.value;
+
+ return 0;
+}
+
+int
+omap_set_param(struct omap_device *dev, uint64_t param, uint64_t value)
+{
+ struct drm_omap_param req = {
+ .param = param,
+ .value = value,
+ };
+ return drmCommandWrite(dev->fd, DRM_OMAP_SET_PARAM, &req, sizeof(req));
+}
+
+/* lookup a buffer from it's handle, call w/ table_lock held: */
+static struct omap_bo * lookup_bo(struct omap_device *dev,
+ uint32_t handle)
+{
+ struct omap_bo *bo = NULL;
+ if (!drmHashLookup(dev->handle_table, handle, (void **)&bo)) {
+ /* found, incr refcnt and return: */
+ bo = omap_bo_ref(bo);
+ }
+ return bo;
+}
+
+/* allocate a new buffer object, call w/ table_lock held */
+static struct omap_bo * bo_from_handle(struct omap_device *dev,
+ uint32_t handle)
+{
+ struct omap_bo *bo = calloc(sizeof(*bo), 1);
+ if (!bo) {
+ struct drm_gem_close req = {
+ .handle = handle,
+ };
+ drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ return NULL;
+ }
+ bo->dev = omap_device_ref(dev);
+ bo->handle = handle;
+ bo->fd = -1;
+ atomic_set(&bo->refcnt, 1);
+ /* add ourselves to the handle table: */
+ drmHashInsert(dev->handle_table, handle, bo);
+ return bo;
+}
+
+/* allocate a new buffer object */
+static struct omap_bo * omap_bo_new_impl(struct omap_device *dev,
+ union omap_gem_size size, uint32_t flags)
+{
+ struct omap_bo *bo = NULL;
+ struct drm_omap_gem_new req = {
+ .size = size,
+ .flags = flags,
+ };
+
+ if (size.bytes == 0) {
+ goto fail;
+ }
+
+ if (drmCommandWriteRead(dev->fd, DRM_OMAP_GEM_NEW, &req, sizeof(req))) {
+ goto fail;
+ }
+
+ pthread_mutex_lock(&table_lock);
+ bo = bo_from_handle(dev, req.handle);
+ pthread_mutex_unlock(&table_lock);
+
+ if (flags & OMAP_BO_TILED) {
+ bo->size = round_up(size.tiled.width, PAGE_SIZE) * size.tiled.height;
+ } else {
+ bo->size = size.bytes;
+ }
+
+ return bo;
+
+fail:
+ free(bo);
+ return NULL;
+}
+
+
+/* allocate a new (un-tiled) buffer object */
+struct omap_bo *
+omap_bo_new(struct omap_device *dev, uint32_t size, uint32_t flags)
+{
+ union omap_gem_size gsize = {
+ .bytes = size,
+ };
+ if (flags & OMAP_BO_TILED) {
+ return NULL;
+ }
+ return omap_bo_new_impl(dev, gsize, flags);
+}
+
+/* allocate a new buffer object */
+struct omap_bo *
+omap_bo_new_tiled(struct omap_device *dev, uint32_t width,
+ uint32_t height, uint32_t flags)
+{
+ union omap_gem_size gsize = {
+ .tiled = {
+ .width = width,
+ .height = height,
+ },
+ };
+ if (!(flags & OMAP_BO_TILED)) {
+ return NULL;
+ }
+ return omap_bo_new_impl(dev, gsize, flags);
+}
+
+struct omap_bo *omap_bo_ref(struct omap_bo *bo)
+{
+ atomic_inc(&bo->refcnt);
+ return bo;
+}
+
+/* get buffer info */
+static int get_buffer_info(struct omap_bo *bo)
+{
+ struct drm_omap_gem_info req = {
+ .handle = bo->handle,
+ };
+ int ret = drmCommandWriteRead(bo->dev->fd, DRM_OMAP_GEM_INFO,
+ &req, sizeof(req));
+ if (ret) {
+ return ret;
+ }
+
+ /* really all we need for now is mmap offset */
+ bo->offset = req.offset;
+ bo->size = req.size;
+
+ return 0;
+}
+
+/* import a buffer object from DRI2 name */
+struct omap_bo *
+omap_bo_from_name(struct omap_device *dev, uint32_t name)
+{
+ struct omap_bo *bo = NULL;
+ struct drm_gem_open req = {
+ .name = name,
+ };
+
+ pthread_mutex_lock(&table_lock);
+
+ if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
+ goto fail;
+ }
+
+ bo = lookup_bo(dev, req.handle);
+ if (!bo) {
+ bo = bo_from_handle(dev, req.handle);
+ bo->name = name;
+ }
+
+ pthread_mutex_unlock(&table_lock);
+
+ return bo;
+
+fail:
+ pthread_mutex_unlock(&table_lock);
+ free(bo);
+ return NULL;
+}
+
+/* import a buffer from dmabuf fd, does not take ownership of the
+ * fd so caller should close() the fd when it is otherwise done
+ * with it (even if it is still using the 'struct omap_bo *')
+ */
+struct omap_bo *
+omap_bo_from_dmabuf(struct omap_device *dev, int fd)
+{
+ struct omap_bo *bo = NULL;
+ struct drm_prime_handle req = {
+ .fd = fd,
+ };
+ int ret;
+
+ pthread_mutex_lock(&table_lock);
+
+ ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
+ if (ret) {
+ goto fail;
+ }
+
+ bo = lookup_bo(dev, req.handle);
+ if (!bo) {
+ bo = bo_from_handle(dev, req.handle);
+ }
+
+ pthread_mutex_unlock(&table_lock);
+
+ return bo;
+
+fail:
+ pthread_mutex_unlock(&table_lock);
+ free(bo);
+ return NULL;
+}
+
+/* destroy a buffer object */
+void omap_bo_del(struct omap_bo *bo)
+{
+ if (!bo) {
+ return;
+ }
+
+ if (!atomic_dec_and_test(&bo->refcnt))
+ return;
+
+ if (bo->map) {
+ munmap(bo->map, bo->size);
+ }
+
+ if (bo->fd >= 0) {
+ close(bo->fd);
+ }
+
+ if (bo->handle) {
+ struct drm_gem_close req = {
+ .handle = bo->handle,
+ };
+ pthread_mutex_lock(&table_lock);
+ drmHashDelete(bo->dev->handle_table, bo->handle);
+ drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ pthread_mutex_unlock(&table_lock);
+ }
+
+ omap_device_del(bo->dev);
+
+ free(bo);
+}
+
+/* get the global flink/DRI2 buffer name */
+int omap_bo_get_name(struct omap_bo *bo, uint32_t *name)
+{
+ if (!bo->name) {
+ struct drm_gem_flink req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
+ if (ret) {
+ return ret;
+ }
+
+ bo->name = req.name;
+ }
+
+ *name = bo->name;
+
+ return 0;
+}
+
+uint32_t omap_bo_handle(struct omap_bo *bo)
+{
+ return bo->handle;
+}
+
+/* caller owns the dmabuf fd that is returned and is responsible
+ * to close() it when done
+ */
+int omap_bo_dmabuf(struct omap_bo *bo)
+{
+ if (bo->fd < 0) {
+ struct drm_prime_handle req = {
+ .handle = bo->handle,
+ .flags = DRM_CLOEXEC,
+ };
+ int ret;
+
+ ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
+ if (ret) {
+ return ret;
+ }
+
+ bo->fd = req.fd;
+ }
+ return dup(bo->fd);
+}
+
+uint32_t omap_bo_size(struct omap_bo *bo)
+{
+ if (!bo->size) {
+ get_buffer_info(bo);
+ }
+ return bo->size;
+}
+
+void *omap_bo_map(struct omap_bo *bo)
+{
+ if (!bo->map) {
+ if (!bo->offset) {
+ get_buffer_info(bo);
+ }
+
+ bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bo->dev->fd, bo->offset);
+ if (bo->map == MAP_FAILED) {
+ bo->map = NULL;
+ }
+ }
+ return bo->map;
+}
+
+int omap_bo_cpu_prep(struct omap_bo *bo, enum omap_gem_op op)
+{
+ struct drm_omap_gem_cpu_prep req = {
+ .handle = bo->handle,
+ .op = op,
+ };
+ return drmCommandWrite(bo->dev->fd,
+ DRM_OMAP_GEM_CPU_PREP, &req, sizeof(req));
+}
+
+int omap_bo_cpu_fini(struct omap_bo *bo, enum omap_gem_op op)
+{
+ struct drm_omap_gem_cpu_fini req = {
+ .handle = bo->handle,
+ .op = op,
+ .nregions = 0,
+ };
+ return drmCommandWrite(bo->dev->fd,
+ DRM_OMAP_GEM_CPU_FINI, &req, sizeof(req));
+}
diff --git a/chromium/third_party/libdrm/src/omap/omap_drm.h b/chromium/third_party/libdrm/src/omap/omap_drm.h
new file mode 100644
index 00000000000..9c6c0e4e7a2
--- /dev/null
+++ b/chromium/third_party/libdrm/src/omap/omap_drm.h
@@ -0,0 +1,135 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2011 Texas Instruments, Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <rob@ti.com>
+ */
+
+#ifndef __OMAP_DRM_H__
+#define __OMAP_DRM_H__
+
+#include <stdint.h>
+#include <drm.h>
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
+
+struct drm_omap_param {
+ uint64_t param; /* in */
+ uint64_t value; /* in (set_param), out (get_param) */
+};
+
+struct drm_omap_get_base {
+ char plugin_name[64]; /* in */
+ uint32_t ioctl_base; /* out */
+ uint32_t __pad;
+};
+
+#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
+#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
+#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
+
+/* cache modes */
+#define OMAP_BO_CACHED 0x00000000 /* default */
+#define OMAP_BO_WC 0x00000002 /* write-combine */
+#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
+
+/* tiled modes */
+#define OMAP_BO_TILED_8 0x00000100
+#define OMAP_BO_TILED_16 0x00000200
+#define OMAP_BO_TILED_32 0x00000300
+#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
+
+union omap_gem_size {
+ uint32_t bytes; /* (for non-tiled formats) */
+ struct {
+ uint16_t width;
+ uint16_t height;
+ } tiled; /* (for tiled formats) */
+};
+
+struct drm_omap_gem_new {
+ union omap_gem_size size; /* in */
+ uint32_t flags; /* in */
+ uint32_t handle; /* out */
+ uint32_t __pad;
+};
+
+/* mask of operations: */
+enum omap_gem_op {
+ OMAP_GEM_READ = 0x01,
+ OMAP_GEM_WRITE = 0x02,
+};
+
+struct drm_omap_gem_cpu_prep {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t op; /* mask of omap_gem_op (in) */
+};
+
+struct drm_omap_gem_cpu_fini {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t op; /* mask of omap_gem_op (in) */
+ /* TODO maybe here we pass down info about what regions are touched
+ * by sw so we can be clever about cache ops? For now a placeholder,
+ * set to zero and we just do full buffer flush..
+ */
+ uint32_t nregions;
+ uint32_t __pad;
+};
+
+struct drm_omap_gem_info {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t pad;
+ uint64_t offset; /* mmap offset (out) */
+ /* note: in case of tiled buffers, the user virtual size can be
+ * different from the physical size (ie. how many pages are needed
+ * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
+ * This size here is the one that should be used if you want to
+ * mmap() the buffer:
+ */
+ uint32_t size; /* virtual size for mmap'ing (out) */
+ uint32_t __pad;
+};
+
+#define DRM_OMAP_GET_PARAM 0x00
+#define DRM_OMAP_SET_PARAM 0x01
+#define DRM_OMAP_GET_BASE 0x02
+#define DRM_OMAP_GEM_NEW 0x03
+#define DRM_OMAP_GEM_CPU_PREP 0x04
+#define DRM_OMAP_GEM_CPU_FINI 0x05
+#define DRM_OMAP_GEM_INFO 0x06
+#define DRM_OMAP_NUM_IOCTLS 0x07
+
+#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
+#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
+#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
+#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
+#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
+#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
+#define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
+
+#endif /* __OMAP_DRM_H__ */
diff --git a/chromium/third_party/libdrm/src/omap/omap_drmif.h b/chromium/third_party/libdrm/src/omap/omap_drmif.h
new file mode 100644
index 00000000000..e62d127820d
--- /dev/null
+++ b/chromium/third_party/libdrm/src/omap/omap_drmif.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011 Texas Instruments, Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <rob@ti.com>
+ */
+
+#ifndef OMAP_DRMIF_H_
+#define OMAP_DRMIF_H_
+
+#include <xf86drm.h>
+#include <stdint.h>
+#include <omap_drm.h>
+
+struct omap_bo;
+struct omap_device;
+
+/* device related functions:
+ */
+
+struct omap_device * omap_device_new(int fd);
+struct omap_device * omap_device_ref(struct omap_device *dev);
+void omap_device_del(struct omap_device *dev);
+int omap_get_param(struct omap_device *dev, uint64_t param, uint64_t *value);
+int omap_set_param(struct omap_device *dev, uint64_t param, uint64_t value);
+
+/* buffer-object related functions:
+ */
+
+struct omap_bo * omap_bo_new(struct omap_device *dev,
+ uint32_t size, uint32_t flags);
+struct omap_bo * omap_bo_new_tiled(struct omap_device *dev,
+ uint32_t width, uint32_t height, uint32_t flags);
+struct omap_bo * omap_bo_ref(struct omap_bo *bo);
+struct omap_bo * omap_bo_from_name(struct omap_device *dev, uint32_t name);
+struct omap_bo * omap_bo_from_dmabuf(struct omap_device *dev, int fd);
+void omap_bo_del(struct omap_bo *bo);
+int omap_bo_get_name(struct omap_bo *bo, uint32_t *name);
+uint32_t omap_bo_handle(struct omap_bo *bo);
+int omap_bo_dmabuf(struct omap_bo *bo);
+uint32_t omap_bo_size(struct omap_bo *bo);
+void * omap_bo_map(struct omap_bo *bo);
+int omap_bo_cpu_prep(struct omap_bo *bo, enum omap_gem_op op);
+int omap_bo_cpu_fini(struct omap_bo *bo, enum omap_gem_op op);
+
+#endif /* OMAP_DRMIF_H_ */
diff --git a/chromium/third_party/libdrm/src/radeon/Android.mk b/chromium/third_party/libdrm/src/radeon/Android.mk
new file mode 100644
index 00000000000..e70444354de
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/Android.mk
@@ -0,0 +1,18 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+# Import variables LIBDRM_RADEON_FILES, LIBDRM_RADEON_H_FILES
+include $(LOCAL_PATH)/Makefile.sources
+
+LOCAL_MODULE := libdrm_radeon
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES := $(patsubst %.h, , $(LIBDRM_RADEON_FILES))
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+
+LOCAL_CFLAGS := \
+ -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/chromium/third_party/libdrm/src/radeon/Makefile.am b/chromium/third_party/libdrm/src/radeon/Makefile.am
new file mode 100644
index 00000000000..e241531488c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/Makefile.am
@@ -0,0 +1,47 @@
+# Copyright © 2008 Jérôme Glisse
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+# Jérôme Glisse <glisse@freedesktop.org>
+
+include Makefile.sources
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ $(PTHREADSTUBS_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_radeon_la_LTLIBRARIES = libdrm_radeon.la
+libdrm_radeon_ladir = $(libdir)
+libdrm_radeon_la_LDFLAGS = -version-number 1:0:1 -no-undefined
+libdrm_radeon_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
+
+libdrm_radeon_la_SOURCES = $(LIBDRM_RADEON_FILES)
+
+libdrm_radeonincludedir = ${includedir}/libdrm
+libdrm_radeoninclude_HEADERS = $(LIBDRM_RADEON_H_FILES)
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm_radeon.pc
+
+TESTS = radeon-symbol-check
+EXTRA_DIST = $(LIBDRM_RADEON_BOF_FILES) $(TESTS)
diff --git a/chromium/third_party/libdrm/src/radeon/Makefile.sources b/chromium/third_party/libdrm/src/radeon/Makefile.sources
new file mode 100644
index 00000000000..1cf482a4edc
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/Makefile.sources
@@ -0,0 +1,21 @@
+LIBDRM_RADEON_FILES := \
+ radeon_bo_gem.c \
+ radeon_cs_gem.c \
+ radeon_cs_space.c \
+ radeon_bo.c \
+ radeon_cs.c \
+ radeon_surface.c
+
+LIBDRM_RADEON_H_FILES := \
+ radeon_bo.h \
+ radeon_cs.h \
+ radeon_surface.h \
+ radeon_bo_gem.h \
+ radeon_cs_gem.h \
+ radeon_bo_int.h \
+ radeon_cs_int.h \
+ r600_pci_ids.h
+
+LIBDRM_RADEON_BOF_FILES := \
+ bof.c \
+ bof.h
diff --git a/chromium/third_party/libdrm/src/radeon/bof.c b/chromium/third_party/libdrm/src/radeon/bof.c
new file mode 100644
index 00000000000..0598cc6bc0f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/bof.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jerome Glisse
+ */
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include "bof.h"
+
+/*
+ * helpers
+ */
+static int bof_entry_grow(bof_t *bof)
+{
+ bof_t **array;
+
+ if (bof->array_size < bof->nentry)
+ return 0;
+ array = realloc(bof->array, (bof->nentry + 16) * sizeof(void*));
+ if (array == NULL)
+ return -ENOMEM;
+ bof->array = array;
+ bof->nentry += 16;
+ return 0;
+}
+
+/*
+ * object
+ */
+bof_t *bof_object(void)
+{
+ bof_t *object;
+
+ object = calloc(1, sizeof(bof_t));
+ if (object == NULL)
+ return NULL;
+ object->refcount = 1;
+ object->type = BOF_TYPE_OBJECT;
+ object->size = 12;
+ return object;
+}
+
+bof_t *bof_object_get(bof_t *object, const char *keyname)
+{
+ unsigned i;
+
+ for (i = 0; i < object->array_size; i += 2) {
+ if (!strcmp(object->array[i]->value, keyname)) {
+ return object->array[i + 1];
+ }
+ }
+ return NULL;
+}
+
+int bof_object_set(bof_t *object, const char *keyname, bof_t *value)
+{
+ bof_t *key;
+ int r;
+
+ if (object->type != BOF_TYPE_OBJECT)
+ return -EINVAL;
+ r = bof_entry_grow(object);
+ if (r)
+ return r;
+ key = bof_string(keyname);
+ if (key == NULL)
+ return -ENOMEM;
+ object->array[object->array_size++] = key;
+ object->array[object->array_size++] = value;
+ object->size += value->size;
+ object->size += key->size;
+ bof_incref(value);
+ return 0;
+}
+
+/*
+ * array
+ */
+bof_t *bof_array(void)
+{
+ bof_t *array = bof_object();
+
+ if (array == NULL)
+ return NULL;
+ array->type = BOF_TYPE_ARRAY;
+ array->size = 12;
+ return array;
+}
+
+int bof_array_append(bof_t *array, bof_t *value)
+{
+ int r;
+ if (array->type != BOF_TYPE_ARRAY)
+ return -EINVAL;
+ r = bof_entry_grow(array);
+ if (r)
+ return r;
+ array->array[array->array_size++] = value;
+ array->size += value->size;
+ bof_incref(value);
+ return 0;
+}
+
+bof_t *bof_array_get(bof_t *bof, unsigned i)
+{
+ if (!bof_is_array(bof) || i >= bof->array_size)
+ return NULL;
+ return bof->array[i];
+}
+
+unsigned bof_array_size(bof_t *bof)
+{
+ if (!bof_is_array(bof))
+ return 0;
+ return bof->array_size;
+}
+
+/*
+ * blob
+ */
+bof_t *bof_blob(unsigned size, void *value)
+{
+ bof_t *blob = bof_object();
+
+ if (blob == NULL)
+ return NULL;
+ blob->type = BOF_TYPE_BLOB;
+ blob->value = calloc(1, size);
+ if (blob->value == NULL) {
+ bof_decref(blob);
+ return NULL;
+ }
+ blob->size = size;
+ memcpy(blob->value, value, size);
+ blob->size += 12;
+ return blob;
+}
+
+unsigned bof_blob_size(bof_t *bof)
+{
+ if (!bof_is_blob(bof))
+ return 0;
+ return bof->size - 12;
+}
+
+void *bof_blob_value(bof_t *bof)
+{
+ if (!bof_is_blob(bof))
+ return NULL;
+ return bof->value;
+}
+
+/*
+ * string
+ */
+bof_t *bof_string(const char *value)
+{
+ bof_t *string = bof_object();
+
+ if (string == NULL)
+ return NULL;
+ string->type = BOF_TYPE_STRING;
+ string->size = strlen(value) + 1;
+ string->value = calloc(1, string->size);
+ if (string->value == NULL) {
+ bof_decref(string);
+ return NULL;
+ }
+ strcpy(string->value, value);
+ string->size += 12;
+ return string;
+}
+
+/*
+ * int32
+ */
+bof_t *bof_int32(int32_t value)
+{
+ bof_t *int32 = bof_object();
+
+ if (int32 == NULL)
+ return NULL;
+ int32->type = BOF_TYPE_INT32;
+ int32->size = 4;
+ int32->value = calloc(1, int32->size);
+ if (int32->value == NULL) {
+ bof_decref(int32);
+ return NULL;
+ }
+ memcpy(int32->value, &value, 4);
+ int32->size += 12;
+ return int32;
+}
+
+int32_t bof_int32_value(bof_t *bof)
+{
+ return *((uint32_t*)bof->value);
+}
+
+/*
+ * common
+ */
+static void bof_indent(int level)
+{
+ int i;
+
+ for (i = 0; i < level; i++)
+ fprintf(stderr, " ");
+}
+
+static void bof_print_bof(bof_t *bof, int level, int entry)
+{
+ bof_indent(level);
+ if (bof == NULL) {
+ fprintf(stderr, "--NULL-- for entry %d\n", entry);
+ return;
+ }
+ switch (bof->type) {
+ case BOF_TYPE_STRING:
+ fprintf(stderr, "%p string [%s %d]\n", bof, (char*)bof->value, bof->size);
+ break;
+ case BOF_TYPE_INT32:
+ fprintf(stderr, "%p int32 [%d %d]\n", bof, *(int*)bof->value, bof->size);
+ break;
+ case BOF_TYPE_BLOB:
+ fprintf(stderr, "%p blob [%d]\n", bof, bof->size);
+ break;
+ case BOF_TYPE_NULL:
+ fprintf(stderr, "%p null [%d]\n", bof, bof->size);
+ break;
+ case BOF_TYPE_OBJECT:
+ fprintf(stderr, "%p object [%d %d]\n", bof, bof->array_size / 2, bof->size);
+ break;
+ case BOF_TYPE_ARRAY:
+ fprintf(stderr, "%p array [%d %d]\n", bof, bof->array_size, bof->size);
+ break;
+ default:
+ fprintf(stderr, "%p unknown [%d]\n", bof, bof->type);
+ return;
+ }
+}
+
+static void bof_print_rec(bof_t *bof, int level, int entry)
+{
+ unsigned i;
+
+ bof_print_bof(bof, level, entry);
+ for (i = 0; i < bof->array_size; i++) {
+ bof_print_rec(bof->array[i], level + 2, i);
+ }
+}
+
+void bof_print(bof_t *bof)
+{
+ bof_print_rec(bof, 0, 0);
+}
+
+static int bof_read(bof_t *root, FILE *file, long end, int level)
+{
+ bof_t *bof = NULL;
+ int r;
+
+ if (ftell(file) >= end) {
+ return 0;
+ }
+ r = bof_entry_grow(root);
+ if (r)
+ return r;
+ bof = bof_object();
+ if (bof == NULL)
+ return -ENOMEM;
+ bof->offset = ftell(file);
+ r = fread(&bof->type, 4, 1, file);
+ if (r != 1)
+ goto out_err;
+ r = fread(&bof->size, 4, 1, file);
+ if (r != 1)
+ goto out_err;
+ r = fread(&bof->array_size, 4, 1, file);
+ if (r != 1)
+ goto out_err;
+ switch (bof->type) {
+ case BOF_TYPE_STRING:
+ case BOF_TYPE_INT32:
+ case BOF_TYPE_BLOB:
+ bof->value = calloc(1, bof->size - 12);
+ if (bof->value == NULL) {
+ goto out_err;
+ }
+ r = fread(bof->value, bof->size - 12, 1, file);
+ if (r != 1) {
+ fprintf(stderr, "error reading %d\n", bof->size - 12);
+ goto out_err;
+ }
+ break;
+ case BOF_TYPE_NULL:
+ return 0;
+ case BOF_TYPE_OBJECT:
+ case BOF_TYPE_ARRAY:
+ r = bof_read(bof, file, bof->offset + bof->size, level + 2);
+ if (r)
+ goto out_err;
+ break;
+ default:
+ fprintf(stderr, "invalid type %d\n", bof->type);
+ goto out_err;
+ }
+ root->array[root->centry++] = bof;
+ return bof_read(root, file, end, level);
+out_err:
+ bof_decref(bof);
+ return -EINVAL;
+}
+
+bof_t *bof_load_file(const char *filename)
+{
+ bof_t *root = bof_object();
+ int r;
+
+ if (root == NULL) {
+ fprintf(stderr, "%s failed to create root object\n", __func__);
+ return NULL;
+ }
+ root->file = fopen(filename, "r");
+ if (root->file == NULL)
+ goto out_err;
+ r = fseek(root->file, 0L, SEEK_SET);
+ if (r) {
+ fprintf(stderr, "%s failed to seek into file %s\n", __func__, filename);
+ goto out_err;
+ }
+ root->offset = ftell(root->file);
+ r = fread(&root->type, 4, 1, root->file);
+ if (r != 1)
+ goto out_err;
+ r = fread(&root->size, 4, 1, root->file);
+ if (r != 1)
+ goto out_err;
+ r = fread(&root->array_size, 4, 1, root->file);
+ if (r != 1)
+ goto out_err;
+ r = bof_read(root, root->file, root->offset + root->size, 2);
+ if (r)
+ goto out_err;
+ return root;
+out_err:
+ bof_decref(root);
+ return NULL;
+}
+
+void bof_incref(bof_t *bof)
+{
+ bof->refcount++;
+}
+
+void bof_decref(bof_t *bof)
+{
+ unsigned i;
+
+ if (bof == NULL)
+ return;
+ if (--bof->refcount > 0)
+ return;
+ for (i = 0; i < bof->array_size; i++) {
+ bof_decref(bof->array[i]);
+ bof->array[i] = NULL;
+ }
+ bof->array_size = 0;
+ if (bof->file) {
+ fclose(bof->file);
+ bof->file = NULL;
+ }
+ free(bof->array);
+ free(bof->value);
+ free(bof);
+}
+
+static int bof_file_write(bof_t *bof, FILE *file)
+{
+ unsigned i;
+ int r;
+
+ r = fwrite(&bof->type, 4, 1, file);
+ if (r != 1)
+ return -EINVAL;
+ r = fwrite(&bof->size, 4, 1, file);
+ if (r != 1)
+ return -EINVAL;
+ r = fwrite(&bof->array_size, 4, 1, file);
+ if (r != 1)
+ return -EINVAL;
+ switch (bof->type) {
+ case BOF_TYPE_NULL:
+ if (bof->size)
+ return -EINVAL;
+ break;
+ case BOF_TYPE_STRING:
+ case BOF_TYPE_INT32:
+ case BOF_TYPE_BLOB:
+ r = fwrite(bof->value, bof->size - 12, 1, file);
+ if (r != 1)
+ return -EINVAL;
+ break;
+ case BOF_TYPE_OBJECT:
+ case BOF_TYPE_ARRAY:
+ for (i = 0; i < bof->array_size; i++) {
+ r = bof_file_write(bof->array[i], file);
+ if (r)
+ return r;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bof_dump_file(bof_t *bof, const char *filename)
+{
+ unsigned i;
+ int r = 0;
+
+ if (bof->file) {
+ fclose(bof->file);
+ bof->file = NULL;
+ }
+ bof->file = fopen(filename, "w");
+ if (bof->file == NULL) {
+ fprintf(stderr, "%s failed to open file %s\n", __func__, filename);
+ r = -EINVAL;
+ goto out_err;
+ }
+ r = fseek(bof->file, 0L, SEEK_SET);
+ if (r) {
+ fprintf(stderr, "%s failed to seek into file %s\n", __func__, filename);
+ goto out_err;
+ }
+ r = fwrite(&bof->type, 4, 1, bof->file);
+ if (r != 1)
+ goto out_err;
+ r = fwrite(&bof->size, 4, 1, bof->file);
+ if (r != 1)
+ goto out_err;
+ r = fwrite(&bof->array_size, 4, 1, bof->file);
+ if (r != 1)
+ goto out_err;
+ for (i = 0; i < bof->array_size; i++) {
+ r = bof_file_write(bof->array[i], bof->file);
+ if (r)
+ return r;
+ }
+out_err:
+ fclose(bof->file);
+ bof->file = NULL;
+ return r;
+}
diff --git a/chromium/third_party/libdrm/src/radeon/bof.h b/chromium/third_party/libdrm/src/radeon/bof.h
new file mode 100644
index 00000000000..014affb74f1
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/bof.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jerome Glisse
+ */
+#ifndef BOF_H
+#define BOF_H
+
+#include <stdio.h>
+#include <stdint.h>
+
+#define BOF_TYPE_STRING 0
+#define BOF_TYPE_NULL 1
+#define BOF_TYPE_BLOB 2
+#define BOF_TYPE_OBJECT 3
+#define BOF_TYPE_ARRAY 4
+#define BOF_TYPE_INT32 5
+
+struct bof;
+
+typedef struct bof {
+ struct bof **array;
+ unsigned centry;
+ unsigned nentry;
+ unsigned refcount;
+ FILE *file;
+ uint32_t type;
+ uint32_t size;
+ uint32_t array_size;
+ void *value;
+ long offset;
+} bof_t;
+
+extern int bof_file_flush(bof_t *root);
+extern bof_t *bof_file_new(const char *filename);
+extern int bof_object_dump(bof_t *object, const char *filename);
+
+/* object */
+extern bof_t *bof_object(void);
+extern bof_t *bof_object_get(bof_t *object, const char *keyname);
+extern int bof_object_set(bof_t *object, const char *keyname, bof_t *value);
+/* array */
+extern bof_t *bof_array(void);
+extern int bof_array_append(bof_t *array, bof_t *value);
+extern bof_t *bof_array_get(bof_t *bof, unsigned i);
+extern unsigned bof_array_size(bof_t *bof);
+/* blob */
+extern bof_t *bof_blob(unsigned size, void *value);
+extern unsigned bof_blob_size(bof_t *bof);
+extern void *bof_blob_value(bof_t *bof);
+/* string */
+extern bof_t *bof_string(const char *value);
+/* int32 */
+extern bof_t *bof_int32(int32_t value);
+extern int32_t bof_int32_value(bof_t *bof);
+/* common functions */
+extern void bof_decref(bof_t *bof);
+extern void bof_incref(bof_t *bof);
+extern bof_t *bof_load_file(const char *filename);
+extern int bof_dump_file(bof_t *bof, const char *filename);
+extern void bof_print(bof_t *bof);
+
+static inline int bof_is_object(bof_t *bof){return (bof->type == BOF_TYPE_OBJECT);}
+static inline int bof_is_blob(bof_t *bof){return (bof->type == BOF_TYPE_BLOB);}
+static inline int bof_is_null(bof_t *bof){return (bof->type == BOF_TYPE_NULL);}
+static inline int bof_is_int32(bof_t *bof){return (bof->type == BOF_TYPE_INT32);}
+static inline int bof_is_array(bof_t *bof){return (bof->type == BOF_TYPE_ARRAY);}
+static inline int bof_is_string(bof_t *bof){return (bof->type == BOF_TYPE_STRING);}
+
+#endif
diff --git a/chromium/third_party/libdrm/src/radeon/libdrm_radeon.pc.in b/chromium/third_party/libdrm/src/radeon/libdrm_radeon.pc.in
new file mode 100644
index 00000000000..432993a376a
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/libdrm_radeon.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_radeon
+Description: Userspace interface to kernel DRM services for radeon
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -ldrm_radeon
+Cflags: -I${includedir} -I${includedir}/libdrm
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/radeon/r600_pci_ids.h b/chromium/third_party/libdrm/src/radeon/r600_pci_ids.h
new file mode 100644
index 00000000000..a3b2eac8f03
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/r600_pci_ids.h
@@ -0,0 +1,487 @@
+CHIPSET(0x9400, R600_9400, R600)
+CHIPSET(0x9401, R600_9401, R600)
+CHIPSET(0x9402, R600_9402, R600)
+CHIPSET(0x9403, R600_9403, R600)
+CHIPSET(0x9405, R600_9405, R600)
+CHIPSET(0x940A, R600_940A, R600)
+CHIPSET(0x940B, R600_940B, R600)
+CHIPSET(0x940F, R600_940F, R600)
+
+CHIPSET(0x94C0, RV610_94C0, RV610)
+CHIPSET(0x94C1, RV610_94C1, RV610)
+CHIPSET(0x94C3, RV610_94C3, RV610)
+CHIPSET(0x94C4, RV610_94C4, RV610)
+CHIPSET(0x94C5, RV610_94C5, RV610)
+CHIPSET(0x94C6, RV610_94C6, RV610)
+CHIPSET(0x94C7, RV610_94C7, RV610)
+CHIPSET(0x94C8, RV610_94C8, RV610)
+CHIPSET(0x94C9, RV610_94C9, RV610)
+CHIPSET(0x94CB, RV610_94CB, RV610)
+CHIPSET(0x94CC, RV610_94CC, RV610)
+CHIPSET(0x94CD, RV610_94CD, RV610)
+
+CHIPSET(0x9580, RV630_9580, RV630)
+CHIPSET(0x9581, RV630_9581, RV630)
+CHIPSET(0x9583, RV630_9583, RV630)
+CHIPSET(0x9586, RV630_9586, RV630)
+CHIPSET(0x9587, RV630_9587, RV630)
+CHIPSET(0x9588, RV630_9588, RV630)
+CHIPSET(0x9589, RV630_9589, RV630)
+CHIPSET(0x958A, RV630_958A, RV630)
+CHIPSET(0x958B, RV630_958B, RV630)
+CHIPSET(0x958C, RV630_958C, RV630)
+CHIPSET(0x958D, RV630_958D, RV630)
+CHIPSET(0x958E, RV630_958E, RV630)
+CHIPSET(0x958F, RV630_958F, RV630)
+
+CHIPSET(0x9500, RV670_9500, RV670)
+CHIPSET(0x9501, RV670_9501, RV670)
+CHIPSET(0x9504, RV670_9504, RV670)
+CHIPSET(0x9505, RV670_9505, RV670)
+CHIPSET(0x9506, RV670_9506, RV670)
+CHIPSET(0x9507, RV670_9507, RV670)
+CHIPSET(0x9508, RV670_9508, RV670)
+CHIPSET(0x9509, RV670_9509, RV670)
+CHIPSET(0x950F, RV670_950F, RV670)
+CHIPSET(0x9511, RV670_9511, RV670)
+CHIPSET(0x9515, RV670_9515, RV670)
+CHIPSET(0x9517, RV670_9517, RV670)
+CHIPSET(0x9519, RV670_9519, RV670)
+
+CHIPSET(0x95C0, RV620_95C0, RV620)
+CHIPSET(0x95C2, RV620_95C2, RV620)
+CHIPSET(0x95C4, RV620_95C4, RV620)
+CHIPSET(0x95C5, RV620_95C5, RV620)
+CHIPSET(0x95C6, RV620_95C6, RV620)
+CHIPSET(0x95C7, RV620_95C7, RV620)
+CHIPSET(0x95C9, RV620_95C9, RV620)
+CHIPSET(0x95CC, RV620_95CC, RV620)
+CHIPSET(0x95CD, RV620_95CD, RV620)
+CHIPSET(0x95CE, RV620_95CE, RV620)
+CHIPSET(0x95CF, RV620_95CF, RV620)
+
+CHIPSET(0x9590, RV635_9590, RV635)
+CHIPSET(0x9591, RV635_9591, RV635)
+CHIPSET(0x9593, RV635_9593, RV635)
+CHIPSET(0x9595, RV635_9595, RV635)
+CHIPSET(0x9596, RV635_9596, RV635)
+CHIPSET(0x9597, RV635_9597, RV635)
+CHIPSET(0x9598, RV635_9598, RV635)
+CHIPSET(0x9599, RV635_9599, RV635)
+CHIPSET(0x959B, RV635_959B, RV635)
+
+CHIPSET(0x9610, RS780_9610, RS780)
+CHIPSET(0x9611, RS780_9611, RS780)
+CHIPSET(0x9612, RS780_9612, RS780)
+CHIPSET(0x9613, RS780_9613, RS780)
+CHIPSET(0x9614, RS780_9614, RS780)
+CHIPSET(0x9615, RS780_9615, RS780)
+CHIPSET(0x9616, RS780_9616, RS780)
+
+CHIPSET(0x9710, RS880_9710, RS880)
+CHIPSET(0x9711, RS880_9711, RS880)
+CHIPSET(0x9712, RS880_9712, RS880)
+CHIPSET(0x9713, RS880_9713, RS880)
+CHIPSET(0x9714, RS880_9714, RS880)
+CHIPSET(0x9715, RS880_9715, RS880)
+
+CHIPSET(0x9440, RV770_9440, RV770)
+CHIPSET(0x9441, RV770_9441, RV770)
+CHIPSET(0x9442, RV770_9442, RV770)
+CHIPSET(0x9443, RV770_9443, RV770)
+CHIPSET(0x9444, RV770_9444, RV770)
+CHIPSET(0x9446, RV770_9446, RV770)
+CHIPSET(0x944A, RV770_944A, RV770)
+CHIPSET(0x944B, RV770_944B, RV770)
+CHIPSET(0x944C, RV770_944C, RV770)
+CHIPSET(0x944E, RV770_944E, RV770)
+CHIPSET(0x9450, RV770_9450, RV770)
+CHIPSET(0x9452, RV770_9452, RV770)
+CHIPSET(0x9456, RV770_9456, RV770)
+CHIPSET(0x945A, RV770_945A, RV770)
+CHIPSET(0x945B, RV770_945B, RV770)
+CHIPSET(0x945E, RV770_945E, RV770)
+CHIPSET(0x9460, RV790_9460, RV770)
+CHIPSET(0x9462, RV790_9462, RV770)
+CHIPSET(0x946A, RV770_946A, RV770)
+CHIPSET(0x946B, RV770_946B, RV770)
+CHIPSET(0x947A, RV770_947A, RV770)
+CHIPSET(0x947B, RV770_947B, RV770)
+
+CHIPSET(0x9480, RV730_9480, RV730)
+CHIPSET(0x9487, RV730_9487, RV730)
+CHIPSET(0x9488, RV730_9488, RV730)
+CHIPSET(0x9489, RV730_9489, RV730)
+CHIPSET(0x948A, RV730_948A, RV730)
+CHIPSET(0x948F, RV730_948F, RV730)
+CHIPSET(0x9490, RV730_9490, RV730)
+CHIPSET(0x9491, RV730_9491, RV730)
+CHIPSET(0x9495, RV730_9495, RV730)
+CHIPSET(0x9498, RV730_9498, RV730)
+CHIPSET(0x949C, RV730_949C, RV730)
+CHIPSET(0x949E, RV730_949E, RV730)
+CHIPSET(0x949F, RV730_949F, RV730)
+
+CHIPSET(0x9540, RV710_9540, RV710)
+CHIPSET(0x9541, RV710_9541, RV710)
+CHIPSET(0x9542, RV710_9542, RV710)
+CHIPSET(0x954E, RV710_954E, RV710)
+CHIPSET(0x954F, RV710_954F, RV710)
+CHIPSET(0x9552, RV710_9552, RV710)
+CHIPSET(0x9553, RV710_9553, RV710)
+CHIPSET(0x9555, RV710_9555, RV710)
+CHIPSET(0x9557, RV710_9557, RV710)
+CHIPSET(0x955F, RV710_955F, RV710)
+
+CHIPSET(0x94A0, RV740_94A0, RV740)
+CHIPSET(0x94A1, RV740_94A1, RV740)
+CHIPSET(0x94A3, RV740_94A3, RV740)
+CHIPSET(0x94B1, RV740_94B1, RV740)
+CHIPSET(0x94B3, RV740_94B3, RV740)
+CHIPSET(0x94B4, RV740_94B4, RV740)
+CHIPSET(0x94B5, RV740_94B5, RV740)
+CHIPSET(0x94B9, RV740_94B9, RV740)
+
+CHIPSET(0x68E0, CEDAR_68E0, CEDAR)
+CHIPSET(0x68E1, CEDAR_68E1, CEDAR)
+CHIPSET(0x68E4, CEDAR_68E4, CEDAR)
+CHIPSET(0x68E5, CEDAR_68E5, CEDAR)
+CHIPSET(0x68E8, CEDAR_68E8, CEDAR)
+CHIPSET(0x68E9, CEDAR_68E9, CEDAR)
+CHIPSET(0x68F1, CEDAR_68F1, CEDAR)
+CHIPSET(0x68F2, CEDAR_68F2, CEDAR)
+CHIPSET(0x68F8, CEDAR_68F8, CEDAR)
+CHIPSET(0x68F9, CEDAR_68F9, CEDAR)
+CHIPSET(0x68FA, CEDAR_68FA, CEDAR)
+CHIPSET(0x68FE, CEDAR_68FE, CEDAR)
+
+CHIPSET(0x68C0, REDWOOD_68C0, REDWOOD)
+CHIPSET(0x68C1, REDWOOD_68C1, REDWOOD)
+CHIPSET(0x68C7, REDWOOD_68C7, REDWOOD)
+CHIPSET(0x68C8, REDWOOD_68C8, REDWOOD)
+CHIPSET(0x68C9, REDWOOD_68C9, REDWOOD)
+CHIPSET(0x68D8, REDWOOD_68D8, REDWOOD)
+CHIPSET(0x68D9, REDWOOD_68D9, REDWOOD)
+CHIPSET(0x68DA, REDWOOD_68DA, REDWOOD)
+CHIPSET(0x68DE, REDWOOD_68DE, REDWOOD)
+
+CHIPSET(0x68A0, JUNIPER_68A0, JUNIPER)
+CHIPSET(0x68A1, JUNIPER_68A1, JUNIPER)
+CHIPSET(0x68A8, JUNIPER_68A8, JUNIPER)
+CHIPSET(0x68A9, JUNIPER_68A9, JUNIPER)
+CHIPSET(0x68B0, JUNIPER_68B0, JUNIPER)
+CHIPSET(0x68B8, JUNIPER_68B8, JUNIPER)
+CHIPSET(0x68B9, JUNIPER_68B9, JUNIPER)
+CHIPSET(0x68BA, JUNIPER_68BA, JUNIPER)
+CHIPSET(0x68BE, JUNIPER_68BE, JUNIPER)
+CHIPSET(0x68BF, JUNIPER_68BF, JUNIPER)
+
+CHIPSET(0x6880, CYPRESS_6880, CYPRESS)
+CHIPSET(0x6888, CYPRESS_6888, CYPRESS)
+CHIPSET(0x6889, CYPRESS_6889, CYPRESS)
+CHIPSET(0x688A, CYPRESS_688A, CYPRESS)
+CHIPSET(0x688C, CYPRESS_688C, CYPRESS)
+CHIPSET(0x688D, CYPRESS_688D, CYPRESS)
+CHIPSET(0x6898, CYPRESS_6898, CYPRESS)
+CHIPSET(0x6899, CYPRESS_6899, CYPRESS)
+CHIPSET(0x689B, CYPRESS_689B, CYPRESS)
+CHIPSET(0x689E, CYPRESS_689E, CYPRESS)
+
+CHIPSET(0x689C, HEMLOCK_689C, HEMLOCK)
+CHIPSET(0x689D, HEMLOCK_689D, HEMLOCK)
+
+CHIPSET(0x9802, PALM_9802, PALM)
+CHIPSET(0x9803, PALM_9803, PALM)
+CHIPSET(0x9804, PALM_9804, PALM)
+CHIPSET(0x9805, PALM_9805, PALM)
+CHIPSET(0x9806, PALM_9806, PALM)
+CHIPSET(0x9807, PALM_9807, PALM)
+CHIPSET(0x9808, PALM_9808, PALM)
+CHIPSET(0x9809, PALM_9809, PALM)
+CHIPSET(0x980A, PALM_980A, PALM)
+
+CHIPSET(0x9640, SUMO_9640, SUMO)
+CHIPSET(0x9641, SUMO_9641, SUMO)
+CHIPSET(0x9642, SUMO2_9642, SUMO2)
+CHIPSET(0x9643, SUMO2_9643, SUMO2)
+CHIPSET(0x9644, SUMO2_9644, SUMO2)
+CHIPSET(0x9645, SUMO2_9645, SUMO2)
+CHIPSET(0x9647, SUMO_9647, SUMO)
+CHIPSET(0x9648, SUMO_9648, SUMO)
+CHIPSET(0x9649, SUMO2_9649, SUMO2)
+CHIPSET(0x964a, SUMO_964A, SUMO)
+CHIPSET(0x964b, SUMO_964B, SUMO)
+CHIPSET(0x964c, SUMO_964C, SUMO)
+CHIPSET(0x964e, SUMO_964E, SUMO)
+CHIPSET(0x964f, SUMO_964F, SUMO)
+
+CHIPSET(0x6700, CAYMAN_6700, CAYMAN)
+CHIPSET(0x6701, CAYMAN_6701, CAYMAN)
+CHIPSET(0x6702, CAYMAN_6702, CAYMAN)
+CHIPSET(0x6703, CAYMAN_6703, CAYMAN)
+CHIPSET(0x6704, CAYMAN_6704, CAYMAN)
+CHIPSET(0x6705, CAYMAN_6705, CAYMAN)
+CHIPSET(0x6706, CAYMAN_6706, CAYMAN)
+CHIPSET(0x6707, CAYMAN_6707, CAYMAN)
+CHIPSET(0x6708, CAYMAN_6708, CAYMAN)
+CHIPSET(0x6709, CAYMAN_6709, CAYMAN)
+CHIPSET(0x6718, CAYMAN_6718, CAYMAN)
+CHIPSET(0x6719, CAYMAN_6719, CAYMAN)
+CHIPSET(0x671C, CAYMAN_671C, CAYMAN)
+CHIPSET(0x671D, CAYMAN_671D, CAYMAN)
+CHIPSET(0x671F, CAYMAN_671F, CAYMAN)
+
+CHIPSET(0x6720, BARTS_6720, BARTS)
+CHIPSET(0x6721, BARTS_6721, BARTS)
+CHIPSET(0x6722, BARTS_6722, BARTS)
+CHIPSET(0x6723, BARTS_6723, BARTS)
+CHIPSET(0x6724, BARTS_6724, BARTS)
+CHIPSET(0x6725, BARTS_6725, BARTS)
+CHIPSET(0x6726, BARTS_6726, BARTS)
+CHIPSET(0x6727, BARTS_6727, BARTS)
+CHIPSET(0x6728, BARTS_6728, BARTS)
+CHIPSET(0x6729, BARTS_6729, BARTS)
+CHIPSET(0x6738, BARTS_6738, BARTS)
+CHIPSET(0x6739, BARTS_6739, BARTS)
+CHIPSET(0x673E, BARTS_673E, BARTS)
+
+CHIPSET(0x6740, TURKS_6740, TURKS)
+CHIPSET(0x6741, TURKS_6741, TURKS)
+CHIPSET(0x6742, TURKS_6742, TURKS)
+CHIPSET(0x6743, TURKS_6743, TURKS)
+CHIPSET(0x6744, TURKS_6744, TURKS)
+CHIPSET(0x6745, TURKS_6745, TURKS)
+CHIPSET(0x6746, TURKS_6746, TURKS)
+CHIPSET(0x6747, TURKS_6747, TURKS)
+CHIPSET(0x6748, TURKS_6748, TURKS)
+CHIPSET(0x6749, TURKS_6749, TURKS)
+CHIPSET(0x674A, TURKS_674A, TURKS)
+CHIPSET(0x6750, TURKS_6750, TURKS)
+CHIPSET(0x6751, TURKS_6751, TURKS)
+CHIPSET(0x6758, TURKS_6758, TURKS)
+CHIPSET(0x6759, TURKS_6759, TURKS)
+CHIPSET(0x675B, TURKS_675B, TURKS)
+CHIPSET(0x675D, TURKS_675D, TURKS)
+CHIPSET(0x675F, TURKS_675F, TURKS)
+CHIPSET(0x6840, TURKS_6840, TURKS)
+CHIPSET(0x6841, TURKS_6841, TURKS)
+CHIPSET(0x6842, TURKS_6842, TURKS)
+CHIPSET(0x6843, TURKS_6843, TURKS)
+CHIPSET(0x6849, TURKS_6849, TURKS)
+CHIPSET(0x6850, TURKS_6850, TURKS)
+CHIPSET(0x6858, TURKS_6858, TURKS)
+CHIPSET(0x6859, TURKS_6859, TURKS)
+
+CHIPSET(0x6760, CAICOS_6760, CAICOS)
+CHIPSET(0x6761, CAICOS_6761, CAICOS)
+CHIPSET(0x6762, CAICOS_6762, CAICOS)
+CHIPSET(0x6763, CAICOS_6763, CAICOS)
+CHIPSET(0x6764, CAICOS_6764, CAICOS)
+CHIPSET(0x6765, CAICOS_6765, CAICOS)
+CHIPSET(0x6766, CAICOS_6766, CAICOS)
+CHIPSET(0x6767, CAICOS_6767, CAICOS)
+CHIPSET(0x6768, CAICOS_6768, CAICOS)
+CHIPSET(0x6770, CAICOS_6770, CAICOS)
+CHIPSET(0x6771, CAICOS_6771, CAICOS)
+CHIPSET(0x6772, CAICOS_6772, CAICOS)
+CHIPSET(0x6778, CAICOS_6778, CAICOS)
+CHIPSET(0x6779, CAICOS_6779, CAICOS)
+CHIPSET(0x677B, CAICOS_677B, CAICOS)
+
+CHIPSET(0x9900, ARUBA_9900, ARUBA)
+CHIPSET(0x9901, ARUBA_9901, ARUBA)
+CHIPSET(0x9903, ARUBA_9903, ARUBA)
+CHIPSET(0x9904, ARUBA_9904, ARUBA)
+CHIPSET(0x9905, ARUBA_9905, ARUBA)
+CHIPSET(0x9906, ARUBA_9906, ARUBA)
+CHIPSET(0x9907, ARUBA_9907, ARUBA)
+CHIPSET(0x9908, ARUBA_9908, ARUBA)
+CHIPSET(0x9909, ARUBA_9909, ARUBA)
+CHIPSET(0x990A, ARUBA_990A, ARUBA)
+CHIPSET(0x990B, ARUBA_990B, ARUBA)
+CHIPSET(0x990C, ARUBA_990C, ARUBA)
+CHIPSET(0x990D, ARUBA_990D, ARUBA)
+CHIPSET(0x990E, ARUBA_990E, ARUBA)
+CHIPSET(0x990F, ARUBA_990F, ARUBA)
+CHIPSET(0x9910, ARUBA_9910, ARUBA)
+CHIPSET(0x9913, ARUBA_9913, ARUBA)
+CHIPSET(0x9917, ARUBA_9917, ARUBA)
+CHIPSET(0x9918, ARUBA_9918, ARUBA)
+CHIPSET(0x9919, ARUBA_9919, ARUBA)
+CHIPSET(0x9990, ARUBA_9990, ARUBA)
+CHIPSET(0x9991, ARUBA_9991, ARUBA)
+CHIPSET(0x9992, ARUBA_9992, ARUBA)
+CHIPSET(0x9993, ARUBA_9993, ARUBA)
+CHIPSET(0x9994, ARUBA_9994, ARUBA)
+CHIPSET(0x9995, ARUBA_9995, ARUBA)
+CHIPSET(0x9996, ARUBA_9996, ARUBA)
+CHIPSET(0x9997, ARUBA_9997, ARUBA)
+CHIPSET(0x9998, ARUBA_9998, ARUBA)
+CHIPSET(0x9999, ARUBA_9999, ARUBA)
+CHIPSET(0x999A, ARUBA_999A, ARUBA)
+CHIPSET(0x999B, ARUBA_999B, ARUBA)
+CHIPSET(0x999C, ARUBA_999C, ARUBA)
+CHIPSET(0x999D, ARUBA_999D, ARUBA)
+CHIPSET(0x99A0, ARUBA_99A0, ARUBA)
+CHIPSET(0x99A2, ARUBA_99A2, ARUBA)
+CHIPSET(0x99A4, ARUBA_99A4, ARUBA)
+
+CHIPSET(0x6780, TAHITI_6780, TAHITI)
+CHIPSET(0x6784, TAHITI_6784, TAHITI)
+CHIPSET(0x6788, TAHITI_6788, TAHITI)
+CHIPSET(0x678A, TAHITI_678A, TAHITI)
+CHIPSET(0x6790, TAHITI_6790, TAHITI)
+CHIPSET(0x6791, TAHITI_6791, TAHITI)
+CHIPSET(0x6792, TAHITI_6792, TAHITI)
+CHIPSET(0x6798, TAHITI_6798, TAHITI)
+CHIPSET(0x6799, TAHITI_6799, TAHITI)
+CHIPSET(0x679A, TAHITI_679A, TAHITI)
+CHIPSET(0x679B, TAHITI_679B, TAHITI)
+CHIPSET(0x679E, TAHITI_679E, TAHITI)
+CHIPSET(0x679F, TAHITI_679F, TAHITI)
+
+CHIPSET(0x6800, PITCAIRN_6800, PITCAIRN)
+CHIPSET(0x6801, PITCAIRN_6801, PITCAIRN)
+CHIPSET(0x6802, PITCAIRN_6802, PITCAIRN)
+CHIPSET(0x6806, PITCAIRN_6806, PITCAIRN)
+CHIPSET(0x6808, PITCAIRN_6808, PITCAIRN)
+CHIPSET(0x6809, PITCAIRN_6809, PITCAIRN)
+CHIPSET(0x6810, PITCAIRN_6810, PITCAIRN)
+CHIPSET(0x6811, PITCAIRN_6811, PITCAIRN)
+CHIPSET(0x6816, PITCAIRN_6816, PITCAIRN)
+CHIPSET(0x6817, PITCAIRN_6817, PITCAIRN)
+CHIPSET(0x6818, PITCAIRN_6818, PITCAIRN)
+CHIPSET(0x6819, PITCAIRN_6819, PITCAIRN)
+CHIPSET(0x684C, PITCAIRN_684C, PITCAIRN)
+
+CHIPSET(0x6820, VERDE_6820, VERDE)
+CHIPSET(0x6821, VERDE_6821, VERDE)
+CHIPSET(0x6822, VERDE_6822, VERDE)
+CHIPSET(0x6823, VERDE_6823, VERDE)
+CHIPSET(0x6824, VERDE_6824, VERDE)
+CHIPSET(0x6825, VERDE_6825, VERDE)
+CHIPSET(0x6826, VERDE_6826, VERDE)
+CHIPSET(0x6827, VERDE_6827, VERDE)
+CHIPSET(0x6828, VERDE_6828, VERDE)
+CHIPSET(0x6829, VERDE_6829, VERDE)
+CHIPSET(0x682A, VERDE_682A, VERDE)
+CHIPSET(0x682B, VERDE_682B, VERDE)
+CHIPSET(0x682C, VERDE_682C, VERDE)
+CHIPSET(0x682D, VERDE_682D, VERDE)
+CHIPSET(0x682F, VERDE_682F, VERDE)
+CHIPSET(0x6830, VERDE_6830, VERDE)
+CHIPSET(0x6831, VERDE_6831, VERDE)
+CHIPSET(0x6835, VERDE_6835, VERDE)
+CHIPSET(0x6837, VERDE_6837, VERDE)
+CHIPSET(0x6838, VERDE_6838, VERDE)
+CHIPSET(0x6839, VERDE_6839, VERDE)
+CHIPSET(0x683B, VERDE_683B, VERDE)
+CHIPSET(0x683D, VERDE_683D, VERDE)
+CHIPSET(0x683F, VERDE_683F, VERDE)
+
+CHIPSET(0x6600, OLAND_6600, OLAND)
+CHIPSET(0x6601, OLAND_6601, OLAND)
+CHIPSET(0x6602, OLAND_6602, OLAND)
+CHIPSET(0x6603, OLAND_6603, OLAND)
+CHIPSET(0x6604, OLAND_6604, OLAND)
+CHIPSET(0x6605, OLAND_6605, OLAND)
+CHIPSET(0x6606, OLAND_6606, OLAND)
+CHIPSET(0x6607, OLAND_6607, OLAND)
+CHIPSET(0x6608, OLAND_6608, OLAND)
+CHIPSET(0x6610, OLAND_6610, OLAND)
+CHIPSET(0x6611, OLAND_6611, OLAND)
+CHIPSET(0x6613, OLAND_6613, OLAND)
+CHIPSET(0x6617, OLAND_6617, OLAND)
+CHIPSET(0x6620, OLAND_6620, OLAND)
+CHIPSET(0x6621, OLAND_6621, OLAND)
+CHIPSET(0x6623, OLAND_6623, OLAND)
+CHIPSET(0x6631, OLAND_6631, OLAND)
+
+CHIPSET(0x6660, HAINAN_6660, HAINAN)
+CHIPSET(0x6663, HAINAN_6663, HAINAN)
+CHIPSET(0x6664, HAINAN_6664, HAINAN)
+CHIPSET(0x6665, HAINAN_6665, HAINAN)
+CHIPSET(0x6667, HAINAN_6667, HAINAN)
+CHIPSET(0x666F, HAINAN_666F, HAINAN)
+
+CHIPSET(0x6640, BONAIRE_6640, BONAIRE)
+CHIPSET(0x6641, BONAIRE_6641, BONAIRE)
+CHIPSET(0x6646, BONAIRE_6646, BONAIRE)
+CHIPSET(0x6647, BONAIRE_6647, BONAIRE)
+CHIPSET(0x6649, BONAIRE_6649, BONAIRE)
+CHIPSET(0x6650, BONAIRE_6650, BONAIRE)
+CHIPSET(0x6651, BONAIRE_6651, BONAIRE)
+CHIPSET(0x6658, BONAIRE_6658, BONAIRE)
+CHIPSET(0x665C, BONAIRE_665C, BONAIRE)
+CHIPSET(0x665D, BONAIRE_665D, BONAIRE)
+CHIPSET(0x665F, BONAIRE_665F, BONAIRE)
+
+CHIPSET(0x9830, KABINI_9830, KABINI)
+CHIPSET(0x9831, KABINI_9831, KABINI)
+CHIPSET(0x9832, KABINI_9832, KABINI)
+CHIPSET(0x9833, KABINI_9833, KABINI)
+CHIPSET(0x9834, KABINI_9834, KABINI)
+CHIPSET(0x9835, KABINI_9835, KABINI)
+CHIPSET(0x9836, KABINI_9836, KABINI)
+CHIPSET(0x9837, KABINI_9837, KABINI)
+CHIPSET(0x9838, KABINI_9838, KABINI)
+CHIPSET(0x9839, KABINI_9839, KABINI)
+CHIPSET(0x983A, KABINI_983A, KABINI)
+CHIPSET(0x983B, KABINI_983B, KABINI)
+CHIPSET(0x983C, KABINI_983C, KABINI)
+CHIPSET(0x983D, KABINI_983D, KABINI)
+CHIPSET(0x983E, KABINI_983E, KABINI)
+CHIPSET(0x983F, KABINI_983F, KABINI)
+
+CHIPSET(0x9850, MULLINS_9850, MULLINS)
+CHIPSET(0x9851, MULLINS_9851, MULLINS)
+CHIPSET(0x9852, MULLINS_9852, MULLINS)
+CHIPSET(0x9853, MULLINS_9853, MULLINS)
+CHIPSET(0x9854, MULLINS_9854, MULLINS)
+CHIPSET(0x9855, MULLINS_9855, MULLINS)
+CHIPSET(0x9856, MULLINS_9856, MULLINS)
+CHIPSET(0x9857, MULLINS_9857, MULLINS)
+CHIPSET(0x9858, MULLINS_9858, MULLINS)
+CHIPSET(0x9859, MULLINS_9859, MULLINS)
+CHIPSET(0x985A, MULLINS_985A, MULLINS)
+CHIPSET(0x985B, MULLINS_985B, MULLINS)
+CHIPSET(0x985C, MULLINS_985C, MULLINS)
+CHIPSET(0x985D, MULLINS_985D, MULLINS)
+CHIPSET(0x985E, MULLINS_985E, MULLINS)
+CHIPSET(0x985F, MULLINS_985F, MULLINS)
+
+CHIPSET(0x1304, KAVERI_1304, KAVERI)
+CHIPSET(0x1305, KAVERI_1305, KAVERI)
+CHIPSET(0x1306, KAVERI_1306, KAVERI)
+CHIPSET(0x1307, KAVERI_1307, KAVERI)
+CHIPSET(0x1309, KAVERI_1309, KAVERI)
+CHIPSET(0x130A, KAVERI_130A, KAVERI)
+CHIPSET(0x130B, KAVERI_130B, KAVERI)
+CHIPSET(0x130C, KAVERI_130C, KAVERI)
+CHIPSET(0x130D, KAVERI_130D, KAVERI)
+CHIPSET(0x130E, KAVERI_130E, KAVERI)
+CHIPSET(0x130F, KAVERI_130F, KAVERI)
+CHIPSET(0x1310, KAVERI_1310, KAVERI)
+CHIPSET(0x1311, KAVERI_1311, KAVERI)
+CHIPSET(0x1312, KAVERI_1312, KAVERI)
+CHIPSET(0x1313, KAVERI_1313, KAVERI)
+CHIPSET(0x1315, KAVERI_1315, KAVERI)
+CHIPSET(0x1316, KAVERI_1316, KAVERI)
+CHIPSET(0x1317, KAVERI_1317, KAVERI)
+CHIPSET(0x1318, KAVERI_1318, KAVERI)
+CHIPSET(0x131B, KAVERI_131B, KAVERI)
+CHIPSET(0x131C, KAVERI_131C, KAVERI)
+CHIPSET(0x131D, KAVERI_131D, KAVERI)
+
+CHIPSET(0x67A0, HAWAII_67A0, HAWAII)
+CHIPSET(0x67A1, HAWAII_67A1, HAWAII)
+CHIPSET(0x67A2, HAWAII_67A2, HAWAII)
+CHIPSET(0x67A8, HAWAII_67A8, HAWAII)
+CHIPSET(0x67A9, HAWAII_67A9, HAWAII)
+CHIPSET(0x67AA, HAWAII_67AA, HAWAII)
+CHIPSET(0x67B0, HAWAII_67B0, HAWAII)
+CHIPSET(0x67B1, HAWAII_67B1, HAWAII)
+CHIPSET(0x67B8, HAWAII_67B8, HAWAII)
+CHIPSET(0x67B9, HAWAII_67B9, HAWAII)
+CHIPSET(0x67BA, HAWAII_67BA, HAWAII)
+CHIPSET(0x67BE, HAWAII_67BE, HAWAII)
diff --git a/chromium/third_party/libdrm/src/radeon/radeon-symbol-check b/chromium/third_party/libdrm/src/radeon/radeon-symbol-check
new file mode 100755
index 00000000000..0bf2ffcbeeb
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon-symbol-check
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# The following symbols (past the first five) are taken from the public headers.
+# A list of the latter should be available Makefile.sources/LIBDRM_RADEON_H_FILES
+
+FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_radeon.so} | awk '{print $3}'| while read func; do
+( grep -q "^$func$" || echo $func ) <<EOF
+__bss_start
+_edata
+_end
+_fini
+_init
+radeon_bo_debug
+radeon_bo_get_handle
+radeon_bo_get_src_domain
+radeon_bo_get_tiling
+radeon_bo_is_busy
+radeon_bo_is_referenced_by_cs
+radeon_bo_is_static
+radeon_bo_manager_gem_ctor
+radeon_bo_manager_gem_dtor
+radeon_bo_map
+radeon_bo_open
+radeon_bo_ref
+radeon_bo_set_tiling
+radeon_bo_unmap
+radeon_bo_unref
+radeon_bo_wait
+radeon_cs_begin
+radeon_cs_create
+radeon_cs_destroy
+radeon_cs_emit
+radeon_cs_end
+radeon_cs_erase
+radeon_cs_get_id
+radeon_cs_manager_gem_ctor
+radeon_cs_manager_gem_dtor
+radeon_cs_need_flush
+radeon_cs_print
+radeon_cs_set_limit
+radeon_cs_space_add_persistent_bo
+radeon_cs_space_check
+radeon_cs_space_check_with_bo
+radeon_cs_space_reset_bos
+radeon_cs_space_set_flush
+radeon_cs_write_reloc
+radeon_gem_bo_open_prime
+radeon_gem_get_kernel_name
+radeon_gem_get_reloc_in_cs
+radeon_gem_name_bo
+radeon_gem_prime_share_bo
+radeon_gem_set_domain
+radeon_surface_best
+radeon_surface_init
+radeon_surface_manager_free
+radeon_surface_manager_new
+EOF
+done)
+
+test ! -n "$FUNCS" || echo $FUNCS
+test ! -n "$FUNCS"
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_bo.c b/chromium/third_party/libdrm/src/radeon/radeon_bo.c
new file mode 100644
index 00000000000..447f9280341
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_bo.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright © 2008 Dave Airlie
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Dave Airlie
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include <libdrm_macros.h>
+#include <radeon_bo.h>
+#include <radeon_bo_int.h>
+
+void radeon_bo_debug(struct radeon_bo *bo, const char *op)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+
+ fprintf(stderr, "%s %p 0x%08X 0x%08X 0x%08X\n",
+ op, bo, bo->handle, boi->size, boi->cref);
+}
+
+struct radeon_bo *
+radeon_bo_open(struct radeon_bo_manager *bom, uint32_t handle, uint32_t size,
+ uint32_t alignment, uint32_t domains, uint32_t flags)
+{
+ struct radeon_bo *bo;
+ bo = bom->funcs->bo_open(bom, handle, size, alignment, domains, flags);
+ return bo;
+}
+
+void radeon_bo_ref(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ boi->cref++;
+ boi->bom->funcs->bo_ref(boi);
+}
+
+struct radeon_bo *radeon_bo_unref(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ if (bo == NULL)
+ return NULL;
+
+ boi->cref--;
+ return boi->bom->funcs->bo_unref(boi);
+}
+
+int radeon_bo_map(struct radeon_bo *bo, int write)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_map(boi, write);
+}
+
+int radeon_bo_unmap(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_unmap(boi);
+}
+
+int radeon_bo_wait(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ if (!boi->bom->funcs->bo_wait)
+ return 0;
+ return boi->bom->funcs->bo_wait(boi);
+}
+
+int radeon_bo_is_busy(struct radeon_bo *bo, uint32_t *domain)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_is_busy(boi, domain);
+}
+
+int
+radeon_bo_set_tiling(struct radeon_bo *bo,
+ uint32_t tiling_flags, uint32_t pitch)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_set_tiling(boi, tiling_flags, pitch);
+}
+
+int
+radeon_bo_get_tiling(struct radeon_bo *bo,
+ uint32_t *tiling_flags, uint32_t *pitch)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_get_tiling(boi, tiling_flags, pitch);
+}
+
+int radeon_bo_is_static(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ if (boi->bom->funcs->bo_is_static)
+ return boi->bom->funcs->bo_is_static(boi);
+ return 0;
+}
+
+int
+radeon_bo_is_referenced_by_cs(struct radeon_bo *bo, struct radeon_cs *cs)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->cref > 1;
+}
+
+uint32_t radeon_bo_get_handle(struct radeon_bo *bo)
+{
+ return bo->handle;
+}
+
+uint32_t radeon_bo_get_src_domain(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ uint32_t src_domain;
+
+ src_domain = boi->space_accounted & 0xffff;
+ if (!src_domain)
+ src_domain = boi->space_accounted >> 16;
+
+ return src_domain;
+}
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_bo.h b/chromium/third_party/libdrm/src/radeon/radeon_bo.h
new file mode 100644
index 00000000000..37478a0d702
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_bo.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_BO_H
+#define RADEON_BO_H
+
+#include <stdio.h>
+#include <stdint.h>
+
+/* bo object */
+#define RADEON_BO_FLAGS_MACRO_TILE 1
+#define RADEON_BO_FLAGS_MICRO_TILE 2
+#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
+
+struct radeon_bo_manager;
+struct radeon_cs;
+
+struct radeon_bo {
+ void *ptr;
+ uint32_t flags;
+ uint32_t handle;
+ uint32_t size;
+};
+
+struct radeon_bo_manager;
+
+void radeon_bo_debug(struct radeon_bo *bo, const char *op);
+
+struct radeon_bo *radeon_bo_open(struct radeon_bo_manager *bom,
+ uint32_t handle,
+ uint32_t size,
+ uint32_t alignment,
+ uint32_t domains,
+ uint32_t flags);
+
+void radeon_bo_ref(struct radeon_bo *bo);
+struct radeon_bo *radeon_bo_unref(struct radeon_bo *bo);
+int radeon_bo_map(struct radeon_bo *bo, int write);
+int radeon_bo_unmap(struct radeon_bo *bo);
+int radeon_bo_wait(struct radeon_bo *bo);
+int radeon_bo_is_busy(struct radeon_bo *bo, uint32_t *domain);
+int radeon_bo_set_tiling(struct radeon_bo *bo, uint32_t tiling_flags, uint32_t pitch);
+int radeon_bo_get_tiling(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch);
+int radeon_bo_is_static(struct radeon_bo *bo);
+int radeon_bo_is_referenced_by_cs(struct radeon_bo *bo, struct radeon_cs *cs);
+uint32_t radeon_bo_get_handle(struct radeon_bo *bo);
+uint32_t radeon_bo_get_src_domain(struct radeon_bo *bo);
+#endif
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_bo_gem.c b/chromium/third_party/libdrm/src/radeon/radeon_bo_gem.c
new file mode 100644
index 00000000000..fbd453d9c7e
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_bo_gem.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright © 2008 Dave Airlie
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Dave Airlie
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "xf86atomic.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_bo.h"
+#include "radeon_bo_int.h"
+#include "radeon_bo_gem.h"
+#include <fcntl.h>
+struct radeon_bo_gem {
+ struct radeon_bo_int base;
+ uint32_t name;
+ int map_count;
+ atomic_t reloc_in_cs;
+ void *priv_ptr;
+};
+
+struct bo_manager_gem {
+ struct radeon_bo_manager base;
+};
+
+static int bo_wait(struct radeon_bo_int *boi);
+
+static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
+ uint32_t handle,
+ uint32_t size,
+ uint32_t alignment,
+ uint32_t domains,
+ uint32_t flags)
+{
+ struct radeon_bo_gem *bo;
+ int r;
+
+ bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
+ if (bo == NULL) {
+ return NULL;
+ }
+
+ bo->base.bom = bom;
+ bo->base.handle = 0;
+ bo->base.size = size;
+ bo->base.alignment = alignment;
+ bo->base.domains = domains;
+ bo->base.flags = flags;
+ bo->base.ptr = NULL;
+ atomic_set(&bo->reloc_in_cs, 0);
+ bo->map_count = 0;
+ if (handle) {
+ struct drm_gem_open open_arg;
+
+ memset(&open_arg, 0, sizeof(open_arg));
+ open_arg.name = handle;
+ r = drmIoctl(bom->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ if (r != 0) {
+ free(bo);
+ return NULL;
+ }
+ bo->base.handle = open_arg.handle;
+ bo->base.size = open_arg.size;
+ bo->name = handle;
+ } else {
+ struct drm_radeon_gem_create args;
+
+ args.size = size;
+ args.alignment = alignment;
+ args.initial_domain = bo->base.domains;
+ args.flags = flags;
+ args.handle = 0;
+ r = drmCommandWriteRead(bom->fd, DRM_RADEON_GEM_CREATE,
+ &args, sizeof(args));
+ bo->base.handle = args.handle;
+ if (r) {
+ fprintf(stderr, "Failed to allocate :\n");
+ fprintf(stderr, " size : %d bytes\n", size);
+ fprintf(stderr, " alignment : %d bytes\n", alignment);
+ fprintf(stderr, " domains : %d\n", bo->base.domains);
+ free(bo);
+ return NULL;
+ }
+ }
+ radeon_bo_ref((struct radeon_bo*)bo);
+ return (struct radeon_bo*)bo;
+}
+
+static void bo_ref(struct radeon_bo_int *boi)
+{
+}
+
+static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
+ struct drm_gem_close args;
+
+ if (boi->cref) {
+ return (struct radeon_bo *)boi;
+ }
+ if (bo_gem->priv_ptr) {
+ drm_munmap(bo_gem->priv_ptr, boi->size);
+ }
+
+ /* Zero out args to make valgrind happy */
+ memset(&args, 0, sizeof(args));
+
+ /* close object */
+ args.handle = boi->handle;
+ drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_CLOSE, &args);
+ memset(bo_gem, 0, sizeof(struct radeon_bo_gem));
+ free(bo_gem);
+ return NULL;
+}
+
+static int bo_map(struct radeon_bo_int *boi, int write)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
+ struct drm_radeon_gem_mmap args;
+ int r;
+ void *ptr;
+
+ if (bo_gem->map_count++ != 0) {
+ return 0;
+ }
+ if (bo_gem->priv_ptr) {
+ goto wait;
+ }
+
+ boi->ptr = NULL;
+
+ /* Zero out args to make valgrind happy */
+ memset(&args, 0, sizeof(args));
+ args.handle = boi->handle;
+ args.offset = 0;
+ args.size = (uint64_t)boi->size;
+ r = drmCommandWriteRead(boi->bom->fd,
+ DRM_RADEON_GEM_MMAP,
+ &args,
+ sizeof(args));
+ if (r) {
+ fprintf(stderr, "error mapping %p 0x%08X (error = %d)\n",
+ boi, boi->handle, r);
+ return r;
+ }
+ ptr = drm_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, boi->bom->fd, args.addr_ptr);
+ if (ptr == MAP_FAILED)
+ return -errno;
+ bo_gem->priv_ptr = ptr;
+wait:
+ boi->ptr = bo_gem->priv_ptr;
+ r = bo_wait(boi);
+ if (r)
+ return r;
+ return 0;
+}
+
+static int bo_unmap(struct radeon_bo_int *boi)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
+
+ if (--bo_gem->map_count > 0) {
+ return 0;
+ }
+ //drm_munmap(bo->ptr, bo->size);
+ boi->ptr = NULL;
+ return 0;
+}
+
+static int bo_wait(struct radeon_bo_int *boi)
+{
+ struct drm_radeon_gem_wait_idle args;
+ int ret;
+
+ /* Zero out args to make valgrind happy */
+ memset(&args, 0, sizeof(args));
+ args.handle = boi->handle;
+ do {
+ ret = drmCommandWrite(boi->bom->fd, DRM_RADEON_GEM_WAIT_IDLE,
+ &args, sizeof(args));
+ } while (ret == -EBUSY);
+ return ret;
+}
+
+static int bo_is_busy(struct radeon_bo_int *boi, uint32_t *domain)
+{
+ struct drm_radeon_gem_busy args;
+ int ret;
+
+ args.handle = boi->handle;
+ args.domain = 0;
+
+ ret = drmCommandWriteRead(boi->bom->fd, DRM_RADEON_GEM_BUSY,
+ &args, sizeof(args));
+
+ *domain = args.domain;
+ return ret;
+}
+
+static int bo_set_tiling(struct radeon_bo_int *boi, uint32_t tiling_flags,
+ uint32_t pitch)
+{
+ struct drm_radeon_gem_set_tiling args;
+ int r;
+
+ args.handle = boi->handle;
+ args.tiling_flags = tiling_flags;
+ args.pitch = pitch;
+
+ r = drmCommandWriteRead(boi->bom->fd,
+ DRM_RADEON_GEM_SET_TILING,
+ &args,
+ sizeof(args));
+ return r;
+}
+
+static int bo_get_tiling(struct radeon_bo_int *boi, uint32_t *tiling_flags,
+ uint32_t *pitch)
+{
+ struct drm_radeon_gem_set_tiling args = {};
+ int r;
+
+ args.handle = boi->handle;
+
+ r = drmCommandWriteRead(boi->bom->fd,
+ DRM_RADEON_GEM_GET_TILING,
+ &args,
+ sizeof(args));
+
+ if (r)
+ return r;
+
+ *tiling_flags = args.tiling_flags;
+ *pitch = args.pitch;
+ return r;
+}
+
+static const struct radeon_bo_funcs bo_gem_funcs = {
+ .bo_open = bo_open,
+ .bo_ref = bo_ref,
+ .bo_unref = bo_unref,
+ .bo_map = bo_map,
+ .bo_unmap = bo_unmap,
+ .bo_wait = bo_wait,
+ .bo_is_static = NULL,
+ .bo_set_tiling = bo_set_tiling,
+ .bo_get_tiling = bo_get_tiling,
+ .bo_is_busy = bo_is_busy,
+ .bo_is_referenced_by_cs = NULL,
+};
+
+struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
+{
+ struct bo_manager_gem *bomg;
+
+ bomg = (struct bo_manager_gem*)calloc(1, sizeof(struct bo_manager_gem));
+ if (bomg == NULL) {
+ return NULL;
+ }
+ bomg->base.funcs = &bo_gem_funcs;
+ bomg->base.fd = fd;
+ return (struct radeon_bo_manager*)bomg;
+}
+
+void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
+{
+ struct bo_manager_gem *bomg = (struct bo_manager_gem*)bom;
+
+ if (bom == NULL) {
+ return;
+ }
+ free(bomg);
+}
+
+uint32_t
+radeon_gem_name_bo(struct radeon_bo *bo)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
+ return bo_gem->name;
+}
+
+void *
+radeon_gem_get_reloc_in_cs(struct radeon_bo *bo)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
+ return &bo_gem->reloc_in_cs;
+}
+
+int
+radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ struct drm_gem_flink flink;
+ int r;
+
+ if (bo_gem->name) {
+ *name = bo_gem->name;
+ return 0;
+ }
+ flink.handle = bo->handle;
+ r = drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (r) {
+ return r;
+ }
+ bo_gem->name = flink.name;
+ *name = flink.name;
+ return 0;
+}
+
+int
+radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ struct drm_radeon_gem_set_domain args;
+ int r;
+
+ args.handle = bo->handle;
+ args.read_domains = read_domains;
+ args.write_domain = write_domain;
+
+ r = drmCommandWriteRead(boi->bom->fd,
+ DRM_RADEON_GEM_SET_DOMAIN,
+ &args,
+ sizeof(args));
+ return r;
+}
+
+int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
+ int ret;
+
+ ret = drmPrimeHandleToFD(bo_gem->base.bom->fd, bo->handle, DRM_CLOEXEC, handle);
+ return ret;
+}
+
+struct radeon_bo *
+radeon_gem_bo_open_prime(struct radeon_bo_manager *bom, int fd_handle, uint32_t size)
+{
+ struct radeon_bo_gem *bo;
+ int r;
+ uint32_t handle;
+
+ bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
+ if (bo == NULL) {
+ return NULL;
+ }
+
+ bo->base.bom = bom;
+ bo->base.handle = 0;
+ bo->base.size = size;
+ bo->base.alignment = 0;
+ bo->base.domains = RADEON_GEM_DOMAIN_GTT;
+ bo->base.flags = 0;
+ bo->base.ptr = NULL;
+ atomic_set(&bo->reloc_in_cs, 0);
+ bo->map_count = 0;
+
+ r = drmPrimeFDToHandle(bom->fd, fd_handle, &handle);
+ if (r != 0) {
+ free(bo);
+ return NULL;
+ }
+
+ bo->base.handle = handle;
+ bo->name = handle;
+
+ radeon_bo_ref((struct radeon_bo *)bo);
+ return (struct radeon_bo *)bo;
+
+}
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_bo_gem.h b/chromium/third_party/libdrm/src/radeon/radeon_bo_gem.h
new file mode 100644
index 00000000000..08965f3a49f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_bo_gem.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2008 Dave Airlie
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Dave Airlie
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_BO_GEM_H
+#define RADEON_BO_GEM_H
+
+#include "radeon_bo.h"
+
+struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd);
+void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom);
+
+uint32_t radeon_gem_name_bo(struct radeon_bo *bo);
+void *radeon_gem_get_reloc_in_cs(struct radeon_bo *bo);
+int radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain);
+int radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name);
+int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle);
+struct radeon_bo *radeon_gem_bo_open_prime(struct radeon_bo_manager *bom,
+ int fd_handle,
+ uint32_t size);
+#endif
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_bo_int.h b/chromium/third_party/libdrm/src/radeon/radeon_bo_int.h
new file mode 100644
index 00000000000..de981b0af16
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_bo_int.h
@@ -0,0 +1,45 @@
+#ifndef RADEON_BO_INT
+#define RADEON_BO_INT
+
+struct radeon_bo_manager {
+ const struct radeon_bo_funcs *funcs;
+ int fd;
+};
+
+struct radeon_bo_int {
+ void *ptr;
+ uint32_t flags;
+ uint32_t handle;
+ uint32_t size;
+ /* private members */
+ uint32_t alignment;
+ uint32_t domains;
+ unsigned cref;
+ struct radeon_bo_manager *bom;
+ uint32_t space_accounted;
+ uint32_t referenced_in_cs;
+};
+
+/* bo functions */
+struct radeon_bo_funcs {
+ struct radeon_bo *(*bo_open)(struct radeon_bo_manager *bom,
+ uint32_t handle,
+ uint32_t size,
+ uint32_t alignment,
+ uint32_t domains,
+ uint32_t flags);
+ void (*bo_ref)(struct radeon_bo_int *bo);
+ struct radeon_bo *(*bo_unref)(struct radeon_bo_int *bo);
+ int (*bo_map)(struct radeon_bo_int *bo, int write);
+ int (*bo_unmap)(struct radeon_bo_int *bo);
+ int (*bo_wait)(struct radeon_bo_int *bo);
+ int (*bo_is_static)(struct radeon_bo_int *bo);
+ int (*bo_set_tiling)(struct radeon_bo_int *bo, uint32_t tiling_flags,
+ uint32_t pitch);
+ int (*bo_get_tiling)(struct radeon_bo_int *bo, uint32_t *tiling_flags,
+ uint32_t *pitch);
+ int (*bo_is_busy)(struct radeon_bo_int *bo, uint32_t *domain);
+ int (*bo_is_referenced_by_cs)(struct radeon_bo_int *bo, struct radeon_cs *cs);
+};
+
+#endif
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_cs.c b/chromium/third_party/libdrm/src/radeon/radeon_cs.c
new file mode 100644
index 00000000000..dffb869faa1
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_cs.c
@@ -0,0 +1,98 @@
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include "libdrm_macros.h"
+#include <stdio.h>
+#include "radeon_cs.h"
+#include "radeon_cs_int.h"
+
+struct radeon_cs *
+radeon_cs_create(struct radeon_cs_manager *csm, uint32_t ndw)
+{
+ struct radeon_cs_int *csi = csm->funcs->cs_create(csm, ndw);
+ return (struct radeon_cs *)csi;
+}
+
+int
+radeon_cs_write_reloc(struct radeon_cs *cs, struct radeon_bo *bo,
+ uint32_t read_domain, uint32_t write_domain,
+ uint32_t flags)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+
+ return csi->csm->funcs->cs_write_reloc(csi,
+ bo,
+ read_domain,
+ write_domain,
+ flags);
+}
+
+int
+radeon_cs_begin(struct radeon_cs *cs, uint32_t ndw,
+ const char *file, const char *func, int line)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_begin(csi, ndw, file, func, line);
+}
+
+int
+radeon_cs_end(struct radeon_cs *cs,
+ const char *file, const char *func, int line)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_end(csi, file, func, line);
+}
+
+int radeon_cs_emit(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_emit(csi);
+}
+
+int radeon_cs_destroy(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_destroy(csi);
+}
+
+int radeon_cs_erase(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_erase(csi);
+}
+
+int radeon_cs_need_flush(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_need_flush(csi);
+}
+
+void radeon_cs_print(struct radeon_cs *cs, FILE *file)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ csi->csm->funcs->cs_print(csi, file);
+}
+
+void
+radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ csi->csm->vram_limit = limit;
+ else
+ csi->csm->gart_limit = limit;
+}
+
+void radeon_cs_space_set_flush(struct radeon_cs *cs,
+ void (*fn)(void *), void *data)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ csi->space_flush_fn = fn;
+ csi->space_flush_data = data;
+}
+
+uint32_t radeon_cs_get_id(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->id;
+}
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_cs.h b/chromium/third_party/libdrm/src/radeon/radeon_cs.h
new file mode 100644
index 00000000000..f68a624eb3f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_cs.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright © 2008 Nicolai Haehnle
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Aapo Tahkola <aet@rasterburn.org>
+ * Nicolai Haehnle <prefect_@gmx.net>
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_CS_H
+#define RADEON_CS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_bo.h"
+
+struct radeon_cs_reloc {
+ struct radeon_bo *bo;
+ uint32_t read_domain;
+ uint32_t write_domain;
+ uint32_t flags;
+};
+
+
+#define RADEON_CS_SPACE_OK 0
+#define RADEON_CS_SPACE_OP_TO_BIG 1
+#define RADEON_CS_SPACE_FLUSH 2
+
+struct radeon_cs {
+ uint32_t *packets;
+ unsigned cdw;
+ unsigned ndw;
+ unsigned section_ndw;
+ unsigned section_cdw;
+};
+
+#define MAX_SPACE_BOS (32)
+
+struct radeon_cs_manager;
+
+extern struct radeon_cs *radeon_cs_create(struct radeon_cs_manager *csm,
+ uint32_t ndw);
+
+extern int radeon_cs_begin(struct radeon_cs *cs,
+ uint32_t ndw,
+ const char *file,
+ const char *func, int line);
+extern int radeon_cs_end(struct radeon_cs *cs,
+ const char *file,
+ const char *func,
+ int line);
+extern int radeon_cs_emit(struct radeon_cs *cs);
+extern int radeon_cs_destroy(struct radeon_cs *cs);
+extern int radeon_cs_erase(struct radeon_cs *cs);
+extern int radeon_cs_need_flush(struct radeon_cs *cs);
+extern void radeon_cs_print(struct radeon_cs *cs, FILE *file);
+extern void radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit);
+extern void radeon_cs_space_set_flush(struct radeon_cs *cs, void (*fn)(void *), void *data);
+extern int radeon_cs_write_reloc(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domain,
+ uint32_t write_domain,
+ uint32_t flags);
+extern uint32_t radeon_cs_get_id(struct radeon_cs *cs);
+/*
+ * add a persistent BO to the list
+ * a persistent BO is one that will be referenced across flushes,
+ * i.e. colorbuffer, textures etc.
+ * They get reset when a new "operation" happens, where an operation
+ * is a state emission with a color/textures etc followed by a bunch of vertices.
+ */
+void radeon_cs_space_add_persistent_bo(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+/* reset the persistent BO list */
+void radeon_cs_space_reset_bos(struct radeon_cs *cs);
+
+/* do a space check with the current persistent BO list */
+int radeon_cs_space_check(struct radeon_cs *cs);
+
+/* do a space check with the current persistent BO list and a temporary BO
+ * a temporary BO is like a DMA buffer, which gets flushed with the
+ * command buffer */
+int radeon_cs_space_check_with_bo(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+static inline void radeon_cs_write_dword(struct radeon_cs *cs, uint32_t dword)
+{
+ cs->packets[cs->cdw++] = dword;
+ if (cs->section_ndw) {
+ cs->section_cdw++;
+ }
+}
+
+static inline void radeon_cs_write_qword(struct radeon_cs *cs, uint64_t qword)
+{
+ memcpy(cs->packets + cs->cdw, &qword, sizeof(uint64_t));
+ cs->cdw += 2;
+ if (cs->section_ndw) {
+ cs->section_cdw += 2;
+ }
+}
+
+static inline void radeon_cs_write_table(struct radeon_cs *cs,
+ const void *data, uint32_t size)
+{
+ memcpy(cs->packets + cs->cdw, data, size * 4);
+ cs->cdw += size;
+ if (cs->section_ndw) {
+ cs->section_cdw += size;
+ }
+}
+#endif
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_cs_gem.c b/chromium/third_party/libdrm/src/radeon/radeon_cs_gem.c
new file mode 100644
index 00000000000..23f33af4cab
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_cs_gem.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Aapo Tahkola <aet@rasterburn.org>
+ * Nicolai Haehnle <prefect_@gmx.net>
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include "radeon_cs.h"
+#include "radeon_cs_int.h"
+#include "radeon_bo_int.h"
+#include "radeon_cs_gem.h"
+#include "radeon_bo_gem.h"
+#include "drm.h"
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "xf86atomic.h"
+#include "radeon_drm.h"
+
+/* Add LIBDRM_RADEON_BOF_FILES to libdrm_radeon_la_SOURCES when building with BOF_DUMP */
+#define CS_BOF_DUMP 0
+#if CS_BOF_DUMP
+#include "bof.h"
+#endif
+
+struct radeon_cs_manager_gem {
+ struct radeon_cs_manager base;
+ uint32_t device_id;
+ unsigned nbof;
+};
+
+#pragma pack(1)
+struct cs_reloc_gem {
+ uint32_t handle;
+ uint32_t read_domain;
+ uint32_t write_domain;
+ uint32_t flags;
+};
+
+#pragma pack()
+#define RELOC_SIZE (sizeof(struct cs_reloc_gem) / sizeof(uint32_t))
+
+struct cs_gem {
+ struct radeon_cs_int base;
+ struct drm_radeon_cs cs;
+ struct drm_radeon_cs_chunk chunks[2];
+ unsigned nrelocs;
+ uint32_t *relocs;
+ struct radeon_bo_int **relocs_bo;
+};
+
+static pthread_mutex_t id_mutex = PTHREAD_MUTEX_INITIALIZER;
+static uint32_t cs_id_source = 0;
+
+/**
+ * result is undefined if called with ~0
+ */
+static uint32_t get_first_zero(const uint32_t n)
+{
+ /* __builtin_ctz returns number of trailing zeros. */
+ return 1 << __builtin_ctz(~n);
+}
+
+/**
+ * Returns a free id for cs.
+ * If there is no free id we return zero
+ **/
+static uint32_t generate_id(void)
+{
+ uint32_t r = 0;
+ pthread_mutex_lock( &id_mutex );
+ /* check for free ids */
+ if (cs_id_source != ~r) {
+ /* find first zero bit */
+ r = get_first_zero(cs_id_source);
+
+ /* set id as reserved */
+ cs_id_source |= r;
+ }
+ pthread_mutex_unlock( &id_mutex );
+ return r;
+}
+
+/**
+ * Free the id for later reuse
+ **/
+static void free_id(uint32_t id)
+{
+ pthread_mutex_lock( &id_mutex );
+
+ cs_id_source &= ~id;
+
+ pthread_mutex_unlock( &id_mutex );
+}
+
+static struct radeon_cs_int *cs_gem_create(struct radeon_cs_manager *csm,
+ uint32_t ndw)
+{
+ struct cs_gem *csg;
+
+ /* max cmd buffer size is 64Kb */
+ if (ndw > (64 * 1024 / 4)) {
+ return NULL;
+ }
+ csg = (struct cs_gem*)calloc(1, sizeof(struct cs_gem));
+ if (csg == NULL) {
+ return NULL;
+ }
+ csg->base.csm = csm;
+ csg->base.ndw = 64 * 1024 / 4;
+ csg->base.packets = (uint32_t*)calloc(1, 64 * 1024);
+ if (csg->base.packets == NULL) {
+ free(csg);
+ return NULL;
+ }
+ csg->base.relocs_total_size = 0;
+ csg->base.crelocs = 0;
+ csg->base.id = generate_id();
+ csg->nrelocs = 4096 / (4 * 4) ;
+ csg->relocs_bo = (struct radeon_bo_int**)calloc(1,
+ csg->nrelocs*sizeof(void*));
+ if (csg->relocs_bo == NULL) {
+ free(csg->base.packets);
+ free(csg);
+ return NULL;
+ }
+ csg->base.relocs = csg->relocs = (uint32_t*)calloc(1, 4096);
+ if (csg->relocs == NULL) {
+ free(csg->relocs_bo);
+ free(csg->base.packets);
+ free(csg);
+ return NULL;
+ }
+ csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
+ csg->chunks[0].length_dw = 0;
+ csg->chunks[0].chunk_data = (uint64_t)(uintptr_t)csg->base.packets;
+ csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
+ csg->chunks[1].length_dw = 0;
+ csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
+ return (struct radeon_cs_int*)csg;
+}
+
+static int cs_gem_write_reloc(struct radeon_cs_int *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domain,
+ uint32_t write_domain,
+ uint32_t flags)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ struct cs_gem *csg = (struct cs_gem*)cs;
+ struct cs_reloc_gem *reloc;
+ uint32_t idx;
+ unsigned i;
+
+ assert(boi->space_accounted);
+
+ /* check domains */
+ if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
+ /* in one CS a bo can only be in read or write domain but not
+ * in read & write domain at the same sime
+ */
+ return -EINVAL;
+ }
+ if (read_domain == RADEON_GEM_DOMAIN_CPU) {
+ return -EINVAL;
+ }
+ if (write_domain == RADEON_GEM_DOMAIN_CPU) {
+ return -EINVAL;
+ }
+ /* use bit field hash function to determine
+ if this bo is for sure not in this cs.*/
+ if ((atomic_read((atomic_t *)radeon_gem_get_reloc_in_cs(bo)) & cs->id)) {
+ /* check if bo is already referenced.
+ * Scanning from end to begin reduces cycles with mesa because
+ * it often relocates same shared dma bo again. */
+ for(i = cs->crelocs; i != 0;) {
+ --i;
+ idx = i * RELOC_SIZE;
+ reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
+ if (reloc->handle == bo->handle) {
+ /* Check domains must be in read or write. As we check already
+ * checked that in argument one of the read or write domain was
+ * set we only need to check that if previous reloc as the read
+ * domain set then the read_domain should also be set for this
+ * new relocation.
+ */
+ /* the DDX expects to read and write from same pixmap */
+ if (write_domain && (reloc->read_domain & write_domain)) {
+ reloc->read_domain = 0;
+ reloc->write_domain = write_domain;
+ } else if (read_domain & reloc->write_domain) {
+ reloc->read_domain = 0;
+ } else {
+ if (write_domain != reloc->write_domain)
+ return -EINVAL;
+ if (read_domain != reloc->read_domain)
+ return -EINVAL;
+ }
+
+ reloc->read_domain |= read_domain;
+ reloc->write_domain |= write_domain;
+ /* update flags */
+ reloc->flags |= (flags & reloc->flags);
+ /* write relocation packet */
+ radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
+ radeon_cs_write_dword((struct radeon_cs *)cs, idx);
+ return 0;
+ }
+ }
+ }
+ /* new relocation */
+ if (csg->base.crelocs >= csg->nrelocs) {
+ /* allocate more memory (TODO: should use a slab allocatore maybe) */
+ uint32_t *tmp, size;
+ size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
+ tmp = (uint32_t*)realloc(csg->relocs_bo, size);
+ if (tmp == NULL) {
+ return -ENOMEM;
+ }
+ csg->relocs_bo = (struct radeon_bo_int **)tmp;
+ size = ((csg->nrelocs + 1) * RELOC_SIZE * 4);
+ tmp = (uint32_t*)realloc(csg->relocs, size);
+ if (tmp == NULL) {
+ return -ENOMEM;
+ }
+ cs->relocs = csg->relocs = tmp;
+ csg->nrelocs += 1;
+ csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
+ }
+ csg->relocs_bo[csg->base.crelocs] = boi;
+ idx = (csg->base.crelocs++) * RELOC_SIZE;
+ reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
+ reloc->handle = bo->handle;
+ reloc->read_domain = read_domain;
+ reloc->write_domain = write_domain;
+ reloc->flags = flags;
+ csg->chunks[1].length_dw += RELOC_SIZE;
+ radeon_bo_ref(bo);
+ /* bo might be referenced from another context so have to use atomic opertions */
+ atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id);
+ cs->relocs_total_size += boi->size;
+ radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
+ radeon_cs_write_dword((struct radeon_cs *)cs, idx);
+ return 0;
+}
+
+static int cs_gem_begin(struct radeon_cs_int *cs,
+ uint32_t ndw,
+ const char *file,
+ const char *func,
+ int line)
+{
+
+ if (cs->section_ndw) {
+ fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
+ cs->section_file, cs->section_func, cs->section_line);
+ fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
+ file, func, line);
+ return -EPIPE;
+ }
+ cs->section_ndw = ndw;
+ cs->section_cdw = 0;
+ cs->section_file = file;
+ cs->section_func = func;
+ cs->section_line = line;
+
+ if (cs->cdw + ndw > cs->ndw) {
+ uint32_t tmp, *ptr;
+
+ /* round up the required size to a multiple of 1024 */
+ tmp = (cs->cdw + ndw + 0x3FF) & (~0x3FF);
+ ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
+ if (ptr == NULL) {
+ return -ENOMEM;
+ }
+ cs->packets = ptr;
+ cs->ndw = tmp;
+ }
+ return 0;
+}
+
+static int cs_gem_end(struct radeon_cs_int *cs,
+ const char *file,
+ const char *func,
+ int line)
+
+{
+ if (!cs->section_ndw) {
+ fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
+ file, func, line);
+ return -EPIPE;
+ }
+ if (cs->section_ndw != cs->section_cdw) {
+ fprintf(stderr, "CS section size mismatch start at (%s,%s,%d) %d vs %d\n",
+ cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
+ fprintf(stderr, "CS section end at (%s,%s,%d)\n",
+ file, func, line);
+
+ /* We must reset the section even when there is error. */
+ cs->section_ndw = 0;
+ return -EPIPE;
+ }
+ cs->section_ndw = 0;
+ return 0;
+}
+
+#if CS_BOF_DUMP
+static void cs_gem_dump_bof(struct radeon_cs_int *cs)
+{
+ struct cs_gem *csg = (struct cs_gem*)cs;
+ struct radeon_cs_manager_gem *csm;
+ bof_t *bcs, *blob, *array, *bo, *size, *handle, *device_id, *root;
+ char tmp[256];
+ unsigned i;
+
+ csm = (struct radeon_cs_manager_gem *)cs->csm;
+ root = device_id = bcs = blob = array = bo = size = handle = NULL;
+ root = bof_object();
+ if (root == NULL)
+ goto out_err;
+ device_id = bof_int32(csm->device_id);
+ if (device_id == NULL)
+ return;
+ if (bof_object_set(root, "device_id", device_id))
+ goto out_err;
+ bof_decref(device_id);
+ device_id = NULL;
+ /* dump relocs */
+ blob = bof_blob(csg->nrelocs * 16, csg->relocs);
+ if (blob == NULL)
+ goto out_err;
+ if (bof_object_set(root, "reloc", blob))
+ goto out_err;
+ bof_decref(blob);
+ blob = NULL;
+ /* dump cs */
+ blob = bof_blob(cs->cdw * 4, cs->packets);
+ if (blob == NULL)
+ goto out_err;
+ if (bof_object_set(root, "pm4", blob))
+ goto out_err;
+ bof_decref(blob);
+ blob = NULL;
+ /* dump bo */
+ array = bof_array();
+ if (array == NULL)
+ goto out_err;
+ for (i = 0; i < csg->base.crelocs; i++) {
+ bo = bof_object();
+ if (bo == NULL)
+ goto out_err;
+ size = bof_int32(csg->relocs_bo[i]->size);
+ if (size == NULL)
+ goto out_err;
+ if (bof_object_set(bo, "size", size))
+ goto out_err;
+ bof_decref(size);
+ size = NULL;
+ handle = bof_int32(csg->relocs_bo[i]->handle);
+ if (handle == NULL)
+ goto out_err;
+ if (bof_object_set(bo, "handle", handle))
+ goto out_err;
+ bof_decref(handle);
+ handle = NULL;
+ radeon_bo_map((struct radeon_bo*)csg->relocs_bo[i], 0);
+ blob = bof_blob(csg->relocs_bo[i]->size, csg->relocs_bo[i]->ptr);
+ radeon_bo_unmap((struct radeon_bo*)csg->relocs_bo[i]);
+ if (blob == NULL)
+ goto out_err;
+ if (bof_object_set(bo, "data", blob))
+ goto out_err;
+ bof_decref(blob);
+ blob = NULL;
+ if (bof_array_append(array, bo))
+ goto out_err;
+ bof_decref(bo);
+ bo = NULL;
+ }
+ if (bof_object_set(root, "bo", array))
+ goto out_err;
+ sprintf(tmp, "d-0x%04X-%08d.bof", csm->device_id, csm->nbof++);
+ bof_dump_file(root, tmp);
+out_err:
+ bof_decref(blob);
+ bof_decref(array);
+ bof_decref(bo);
+ bof_decref(size);
+ bof_decref(handle);
+ bof_decref(device_id);
+ bof_decref(root);
+}
+#endif
+
+static int cs_gem_emit(struct radeon_cs_int *cs)
+{
+ struct cs_gem *csg = (struct cs_gem*)cs;
+ uint64_t chunk_array[2];
+ unsigned i;
+ int r;
+
+ while (cs->cdw & 7)
+ radeon_cs_write_dword((struct radeon_cs *)cs, 0x80000000);
+
+#if CS_BOF_DUMP
+ cs_gem_dump_bof(cs);
+#endif
+ csg->chunks[0].length_dw = cs->cdw;
+
+ chunk_array[0] = (uint64_t)(uintptr_t)&csg->chunks[0];
+ chunk_array[1] = (uint64_t)(uintptr_t)&csg->chunks[1];
+
+ csg->cs.num_chunks = 2;
+ csg->cs.chunks = (uint64_t)(uintptr_t)chunk_array;
+
+ r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS,
+ &csg->cs, sizeof(struct drm_radeon_cs));
+ for (i = 0; i < csg->base.crelocs; i++) {
+ csg->relocs_bo[i]->space_accounted = 0;
+ /* bo might be referenced from another context so have to use atomic opertions */
+ atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
+ radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
+ csg->relocs_bo[i] = NULL;
+ }
+
+ cs->csm->read_used = 0;
+ cs->csm->vram_write_used = 0;
+ cs->csm->gart_write_used = 0;
+ return r;
+}
+
+static int cs_gem_destroy(struct radeon_cs_int *cs)
+{
+ struct cs_gem *csg = (struct cs_gem*)cs;
+
+ free_id(cs->id);
+ free(csg->relocs_bo);
+ free(cs->relocs);
+ free(cs->packets);
+ free(cs);
+ return 0;
+}
+
+static int cs_gem_erase(struct radeon_cs_int *cs)
+{
+ struct cs_gem *csg = (struct cs_gem*)cs;
+ unsigned i;
+
+ if (csg->relocs_bo) {
+ for (i = 0; i < csg->base.crelocs; i++) {
+ if (csg->relocs_bo[i]) {
+ /* bo might be referenced from another context so have to use atomic opertions */
+ atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
+ radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
+ csg->relocs_bo[i] = NULL;
+ }
+ }
+ }
+ cs->relocs_total_size = 0;
+ cs->cdw = 0;
+ cs->section_ndw = 0;
+ cs->crelocs = 0;
+ csg->chunks[0].length_dw = 0;
+ csg->chunks[1].length_dw = 0;
+ return 0;
+}
+
+static int cs_gem_need_flush(struct radeon_cs_int *cs)
+{
+ return 0; //(cs->relocs_total_size > (32*1024*1024));
+}
+
+static void cs_gem_print(struct radeon_cs_int *cs, FILE *file)
+{
+ struct radeon_cs_manager_gem *csm;
+ unsigned int i;
+
+ csm = (struct radeon_cs_manager_gem *)cs->csm;
+ fprintf(file, "VENDORID:DEVICEID 0x%04X:0x%04X\n", 0x1002, csm->device_id);
+ for (i = 0; i < cs->cdw; i++) {
+ fprintf(file, "0x%08X\n", cs->packets[i]);
+ }
+}
+
+static const struct radeon_cs_funcs radeon_cs_gem_funcs = {
+ .cs_create = cs_gem_create,
+ .cs_write_reloc = cs_gem_write_reloc,
+ .cs_begin = cs_gem_begin,
+ .cs_end = cs_gem_end,
+ .cs_emit = cs_gem_emit,
+ .cs_destroy = cs_gem_destroy,
+ .cs_erase = cs_gem_erase,
+ .cs_need_flush = cs_gem_need_flush,
+ .cs_print = cs_gem_print,
+};
+
+static int radeon_get_device_id(int fd, uint32_t *device_id)
+{
+ struct drm_radeon_info info = {};
+ int r;
+
+ *device_id = 0;
+ info.request = RADEON_INFO_DEVICE_ID;
+ info.value = (uintptr_t)device_id;
+ r = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info,
+ sizeof(struct drm_radeon_info));
+ return r;
+}
+
+struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
+{
+ struct radeon_cs_manager_gem *csm;
+
+ csm = calloc(1, sizeof(struct radeon_cs_manager_gem));
+ if (csm == NULL) {
+ return NULL;
+ }
+ csm->base.funcs = &radeon_cs_gem_funcs;
+ csm->base.fd = fd;
+ radeon_get_device_id(fd, &csm->device_id);
+ return &csm->base;
+}
+
+void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)
+{
+ free(csm);
+}
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_cs_gem.h b/chromium/third_party/libdrm/src/radeon/radeon_cs_gem.h
new file mode 100644
index 00000000000..5dea38ad241
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_cs_gem.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2008 Nicolai Haehnle
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Aapo Tahkola <aet@rasterburn.org>
+ * Nicolai Haehnle <prefect_@gmx.net>
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_CS_GEM_H
+#define RADEON_CS_GEM_H
+
+#include "radeon_cs.h"
+
+struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd);
+void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm);
+
+#endif
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_cs_int.h b/chromium/third_party/libdrm/src/radeon/radeon_cs_int.h
new file mode 100644
index 00000000000..d906ad438ea
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_cs_int.h
@@ -0,0 +1,67 @@
+
+#ifndef _RADEON_CS_INT_H_
+#define _RADEON_CS_INT_H_
+
+struct radeon_cs_space_check {
+ struct radeon_bo_int *bo;
+ uint32_t read_domains;
+ uint32_t write_domain;
+ uint32_t new_accounted;
+};
+
+struct radeon_cs_int {
+ /* keep first two in same place */
+ uint32_t *packets;
+ unsigned cdw;
+ unsigned ndw;
+ unsigned section_ndw;
+ unsigned section_cdw;
+ /* private members */
+ struct radeon_cs_manager *csm;
+ void *relocs;
+ unsigned crelocs;
+ unsigned relocs_total_size;
+ const char *section_file;
+ const char *section_func;
+ int section_line;
+ struct radeon_cs_space_check bos[MAX_SPACE_BOS];
+ int bo_count;
+ void (*space_flush_fn)(void *);
+ void *space_flush_data;
+ uint32_t id;
+};
+
+/* cs functions */
+struct radeon_cs_funcs {
+ struct radeon_cs_int *(*cs_create)(struct radeon_cs_manager *csm,
+ uint32_t ndw);
+ int (*cs_write_reloc)(struct radeon_cs_int *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domain,
+ uint32_t write_domain,
+ uint32_t flags);
+ int (*cs_begin)(struct radeon_cs_int *cs,
+ uint32_t ndw,
+ const char *file,
+ const char *func,
+ int line);
+ int (*cs_end)(struct radeon_cs_int *cs,
+ const char *file, const char *func,
+ int line);
+
+
+ int (*cs_emit)(struct radeon_cs_int *cs);
+ int (*cs_destroy)(struct radeon_cs_int *cs);
+ int (*cs_erase)(struct radeon_cs_int *cs);
+ int (*cs_need_flush)(struct radeon_cs_int *cs);
+ void (*cs_print)(struct radeon_cs_int *cs, FILE *file);
+};
+
+struct radeon_cs_manager {
+ const struct radeon_cs_funcs *funcs;
+ int fd;
+ int32_t vram_limit, gart_limit;
+ int32_t vram_write_used, gart_write_used;
+ int32_t read_used;
+};
+#endif
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_cs_space.c b/chromium/third_party/libdrm/src/radeon/radeon_cs_space.c
new file mode 100644
index 00000000000..69287be5bc8
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_cs_space.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright © 2009 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include "libdrm_macros.h"
+#include "radeon_cs.h"
+#include "radeon_bo_int.h"
+#include "radeon_cs_int.h"
+
+struct rad_sizes {
+ int32_t op_read;
+ int32_t op_gart_write;
+ int32_t op_vram_write;
+};
+
+static inline int radeon_cs_setup_bo(struct radeon_cs_space_check *sc, struct rad_sizes *sizes)
+{
+ uint32_t read_domains, write_domain;
+ struct radeon_bo_int *bo;
+
+ bo = sc->bo;
+ sc->new_accounted = 0;
+ read_domains = sc->read_domains;
+ write_domain = sc->write_domain;
+
+ /* legacy needs a static check */
+ if (radeon_bo_is_static((struct radeon_bo *)sc->bo)) {
+ bo->space_accounted = sc->new_accounted = (read_domains << 16) | write_domain;
+ return 0;
+ }
+
+ /* already accounted this bo */
+ if (write_domain && (write_domain == bo->space_accounted)) {
+ sc->new_accounted = bo->space_accounted;
+ return 0;
+ }
+ if (read_domains && ((read_domains << 16) == bo->space_accounted)) {
+ sc->new_accounted = bo->space_accounted;
+ return 0;
+ }
+
+ if (bo->space_accounted == 0) {
+ if (write_domain) {
+ if (write_domain == RADEON_GEM_DOMAIN_VRAM)
+ sizes->op_vram_write += bo->size;
+ else if (write_domain == RADEON_GEM_DOMAIN_GTT)
+ sizes->op_gart_write += bo->size;
+ sc->new_accounted = write_domain;
+ } else {
+ sizes->op_read += bo->size;
+ sc->new_accounted = read_domains << 16;
+ }
+ } else {
+ uint16_t old_read, old_write;
+
+ old_read = bo->space_accounted >> 16;
+ old_write = bo->space_accounted & 0xffff;
+
+ if (write_domain && (old_read & write_domain)) {
+ sc->new_accounted = write_domain;
+ /* moving from read to a write domain */
+ if (write_domain == RADEON_GEM_DOMAIN_VRAM) {
+ sizes->op_read -= bo->size;
+ sizes->op_vram_write += bo->size;
+ } else if (write_domain == RADEON_GEM_DOMAIN_GTT) {
+ sizes->op_read -= bo->size;
+ sizes->op_gart_write += bo->size;
+ }
+ } else if (read_domains & old_write) {
+ sc->new_accounted = bo->space_accounted & 0xffff;
+ } else {
+ /* rewrite the domains */
+ if (write_domain != old_write)
+ fprintf(stderr,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, write_domain, old_write);
+ if (read_domains != old_read)
+ fprintf(stderr,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, read_domains, old_read);
+ return RADEON_CS_SPACE_FLUSH;
+ }
+ }
+ return 0;
+}
+
+static int radeon_cs_do_space_check(struct radeon_cs_int *cs, struct radeon_cs_space_check *new_tmp)
+{
+ struct radeon_cs_manager *csm = cs->csm;
+ int i;
+ struct radeon_bo_int *bo;
+ struct rad_sizes sizes;
+ int ret;
+
+ /* check the totals for this operation */
+
+ if (cs->bo_count == 0 && !new_tmp)
+ return 0;
+
+ memset(&sizes, 0, sizeof(struct rad_sizes));
+
+ /* prepare */
+ for (i = 0; i < cs->bo_count; i++) {
+ ret = radeon_cs_setup_bo(&cs->bos[i], &sizes);
+ if (ret)
+ return ret;
+ }
+
+ if (new_tmp) {
+ ret = radeon_cs_setup_bo(new_tmp, &sizes);
+ if (ret)
+ return ret;
+ }
+
+ if (sizes.op_read < 0)
+ sizes.op_read = 0;
+
+ /* check sizes - operation first */
+ if ((sizes.op_read + sizes.op_gart_write > csm->gart_limit) ||
+ (sizes.op_vram_write > csm->vram_limit)) {
+ return RADEON_CS_SPACE_OP_TO_BIG;
+ }
+
+ if (((csm->vram_write_used + sizes.op_vram_write) > csm->vram_limit) ||
+ ((csm->read_used + csm->gart_write_used + sizes.op_gart_write + sizes.op_read) > csm->gart_limit)) {
+ return RADEON_CS_SPACE_FLUSH;
+ }
+
+ csm->gart_write_used += sizes.op_gart_write;
+ csm->vram_write_used += sizes.op_vram_write;
+ csm->read_used += sizes.op_read;
+ /* commit */
+ for (i = 0; i < cs->bo_count; i++) {
+ bo = cs->bos[i].bo;
+ bo->space_accounted = cs->bos[i].new_accounted;
+ }
+ if (new_tmp)
+ new_tmp->bo->space_accounted = new_tmp->new_accounted;
+
+ return RADEON_CS_SPACE_OK;
+}
+
+void
+radeon_cs_space_add_persistent_bo(struct radeon_cs *cs, struct radeon_bo *bo,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ int i;
+ for (i = 0; i < csi->bo_count; i++) {
+ if (csi->bos[i].bo == boi &&
+ csi->bos[i].read_domains == read_domains &&
+ csi->bos[i].write_domain == write_domain)
+ return;
+ }
+ radeon_bo_ref(bo);
+ i = csi->bo_count;
+ csi->bos[i].bo = boi;
+ csi->bos[i].read_domains = read_domains;
+ csi->bos[i].write_domain = write_domain;
+ csi->bos[i].new_accounted = 0;
+ csi->bo_count++;
+
+ assert(csi->bo_count < MAX_SPACE_BOS);
+}
+
+static int radeon_cs_check_space_internal(struct radeon_cs_int *cs,
+ struct radeon_cs_space_check *tmp_bo)
+{
+ int ret;
+ int flushed = 0;
+
+again:
+ ret = radeon_cs_do_space_check(cs, tmp_bo);
+ if (ret == RADEON_CS_SPACE_OP_TO_BIG)
+ return -1;
+ if (ret == RADEON_CS_SPACE_FLUSH) {
+ (*cs->space_flush_fn)(cs->space_flush_data);
+ if (flushed)
+ return -1;
+ flushed = 1;
+ goto again;
+ }
+ return 0;
+}
+
+int
+radeon_cs_space_check_with_bo(struct radeon_cs *cs, struct radeon_bo *bo,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ struct radeon_cs_space_check temp_bo;
+
+ int ret = 0;
+
+ if (bo) {
+ temp_bo.bo = boi;
+ temp_bo.read_domains = read_domains;
+ temp_bo.write_domain = write_domain;
+ temp_bo.new_accounted = 0;
+ }
+
+ ret = radeon_cs_check_space_internal(csi, bo ? &temp_bo : NULL);
+ return ret;
+}
+
+int radeon_cs_space_check(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return radeon_cs_check_space_internal(csi, NULL);
+}
+
+void radeon_cs_space_reset_bos(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ int i;
+ for (i = 0; i < csi->bo_count; i++) {
+ radeon_bo_unref((struct radeon_bo *)csi->bos[i].bo);
+ csi->bos[i].bo = NULL;
+ csi->bos[i].read_domains = 0;
+ csi->bos[i].write_domain = 0;
+ csi->bos[i].new_accounted = 0;
+ }
+ csi->bo_count = 0;
+}
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_surface.c b/chromium/third_party/libdrm/src/radeon/radeon_surface.c
new file mode 100644
index 00000000000..1424660daae
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_surface.c
@@ -0,0 +1,2560 @@
+/*
+ * Copyright © 2011 Red Hat All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <jglisse@redhat.com>
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include <stdbool.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include "drm.h"
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "radeon_drm.h"
+#include "radeon_surface.h"
+
+#define ALIGN(value, alignment) (((value) + alignment - 1) & ~(alignment - 1))
+#define MAX2(A, B) ((A) > (B) ? (A) : (B))
+#define MIN2(A, B) ((A) < (B) ? (A) : (B))
+
+/* keep this private */
+enum radeon_family {
+ CHIP_UNKNOWN,
+ CHIP_R600,
+ CHIP_RV610,
+ CHIP_RV630,
+ CHIP_RV670,
+ CHIP_RV620,
+ CHIP_RV635,
+ CHIP_RS780,
+ CHIP_RS880,
+ CHIP_RV770,
+ CHIP_RV730,
+ CHIP_RV710,
+ CHIP_RV740,
+ CHIP_CEDAR,
+ CHIP_REDWOOD,
+ CHIP_JUNIPER,
+ CHIP_CYPRESS,
+ CHIP_HEMLOCK,
+ CHIP_PALM,
+ CHIP_SUMO,
+ CHIP_SUMO2,
+ CHIP_BARTS,
+ CHIP_TURKS,
+ CHIP_CAICOS,
+ CHIP_CAYMAN,
+ CHIP_ARUBA,
+ CHIP_TAHITI,
+ CHIP_PITCAIRN,
+ CHIP_VERDE,
+ CHIP_OLAND,
+ CHIP_HAINAN,
+ CHIP_BONAIRE,
+ CHIP_KAVERI,
+ CHIP_KABINI,
+ CHIP_HAWAII,
+ CHIP_MULLINS,
+ CHIP_LAST,
+};
+
+typedef int (*hw_init_surface_t)(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+typedef int (*hw_best_surface_t)(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+
+struct radeon_hw_info {
+ /* apply to r6, eg */
+ uint32_t group_bytes;
+ uint32_t num_banks;
+ uint32_t num_pipes;
+ /* apply to eg */
+ uint32_t row_size;
+ unsigned allow_2d;
+ /* apply to si */
+ uint32_t tile_mode_array[32];
+ /* apply to cik */
+ uint32_t macrotile_mode_array[16];
+};
+
+struct radeon_surface_manager {
+ int fd;
+ uint32_t device_id;
+ struct radeon_hw_info hw_info;
+ unsigned family;
+ hw_init_surface_t surface_init;
+ hw_best_surface_t surface_best;
+};
+
+/* helper */
+static int radeon_get_value(int fd, unsigned req, uint32_t *value)
+{
+ struct drm_radeon_info info = {};
+ int r;
+
+ *value = 0;
+ info.request = req;
+ info.value = (uintptr_t)value;
+ r = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info,
+ sizeof(struct drm_radeon_info));
+ return r;
+}
+
+static int radeon_get_family(struct radeon_surface_manager *surf_man)
+{
+ switch (surf_man->device_id) {
+#define CHIPSET(pci_id, name, fam) case pci_id: surf_man->family = CHIP_##fam; break;
+#include "r600_pci_ids.h"
+#undef CHIPSET
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static unsigned next_power_of_two(unsigned x)
+{
+ if (x <= 1)
+ return 1;
+
+ return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x - 1)));
+}
+
+static unsigned mip_minify(unsigned size, unsigned level)
+{
+ unsigned val;
+
+ val = MAX2(1, size >> level);
+ if (level > 0)
+ val = next_power_of_two(val);
+ return val;
+}
+
+static void surf_minify(struct radeon_surface *surf,
+ struct radeon_surface_level *surflevel,
+ unsigned bpe, unsigned level,
+ uint32_t xalign, uint32_t yalign, uint32_t zalign,
+ uint64_t offset)
+{
+ surflevel->npix_x = mip_minify(surf->npix_x, level);
+ surflevel->npix_y = mip_minify(surf->npix_y, level);
+ surflevel->npix_z = mip_minify(surf->npix_z, level);
+ surflevel->nblk_x = (surflevel->npix_x + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (surflevel->npix_y + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (surflevel->npix_z + surf->blk_d - 1) / surf->blk_d;
+ if (surf->nsamples == 1 && surflevel->mode == RADEON_SURF_MODE_2D &&
+ !(surf->flags & RADEON_SURF_FMASK)) {
+ if (surflevel->nblk_x < xalign || surflevel->nblk_y < yalign) {
+ surflevel->mode = RADEON_SURF_MODE_1D;
+ return;
+ }
+ }
+ surflevel->nblk_x = ALIGN(surflevel->nblk_x, xalign);
+ surflevel->nblk_y = ALIGN(surflevel->nblk_y, yalign);
+ surflevel->nblk_z = ALIGN(surflevel->nblk_z, zalign);
+
+ surflevel->offset = offset;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
+ surflevel->slice_size = (uint64_t)surflevel->pitch_bytes * surflevel->nblk_y;
+
+ surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
+}
+
+/* ===========================================================================
+ * r600/r700 family
+ */
+static int r6_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 14) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+ drmFreeVersion(version);
+
+ switch ((tiling_config & 0xe) >> 1) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ surf_man->hw_info.num_pipes = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0x30) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ default:
+ surf_man->hw_info.num_banks = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xc0) >> 6) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ surf_man->hw_info.group_bytes = 256;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+ return 0;
+}
+
+static int r6_surface_init_linear(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign;
+ unsigned i;
+
+ /* compute alignment */
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+ /* the 32 alignment is for scanout, cb or db but to allow texture to be
+ * easily bound as such we force this alignment to all surface
+ */
+ xalign = MAX2(1, surf_man->hw_info.group_bytes / surf->bpe);
+ yalign = 1;
+ zalign = 1;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((surf->bpe == 1) ? 64 : 32, xalign);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_LINEAR;
+ surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init_linear_aligned(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign;
+ unsigned i;
+
+ /* compute alignment */
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+ xalign = MAX2(64, surf_man->hw_info.group_bytes / surf->bpe);
+ yalign = 1;
+ zalign = 1;
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
+ surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init_1d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, tilew;
+ unsigned i;
+
+ /* compute alignment */
+ tilew = 8;
+ xalign = surf_man->hw_info.group_bytes / (tilew * surf->bpe * surf->nsamples);
+ xalign = MAX2(tilew, xalign);
+ yalign = tilew;
+ zalign = 1;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((surf->bpe == 1) ? 64 : 32, xalign);
+ }
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_1D;
+ surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, tilew;
+ unsigned i;
+
+ /* compute alignment */
+ tilew = 8;
+ zalign = 1;
+ xalign = (surf_man->hw_info.group_bytes * surf_man->hw_info.num_banks) /
+ (tilew * surf->bpe * surf->nsamples);
+ xalign = MAX2(tilew * surf_man->hw_info.num_banks, xalign);
+ if (surf->flags & RADEON_SURF_FMASK)
+ xalign = MAX2(128, xalign);
+ yalign = tilew * surf_man->hw_info.num_pipes;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((surf->bpe == 1) ? 64 : 32, xalign);
+ }
+ if (!start_level) {
+ surf->bo_alignment =
+ MAX2(surf_man->hw_info.num_pipes *
+ surf_man->hw_info.num_banks *
+ surf->nsamples * surf->bpe * 64,
+ xalign * yalign * surf->nsamples * surf->bpe);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_2D;
+ surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
+ if (surf->level[i].mode == RADEON_SURF_MODE_1D) {
+ return r6_surface_init_1d(surf_man, surf, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode;
+ int r;
+
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ /* zbuffer only support 1D or 2D tiled surface */
+ switch (mode) {
+ case RADEON_SURF_MODE_1D:
+ case RADEON_SURF_MODE_2D:
+ break;
+ default:
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ break;
+ }
+ }
+
+ /* force 1d on kernel that can't do 2d */
+ if (!surf_man->hw_info.allow_2d && mode > RADEON_SURF_MODE_1D) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 2D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ /* check surface dimension */
+ if (surf->npix_x > 8192 || surf->npix_y > 8192 || surf->npix_z > 8192) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 14) {
+ return -EINVAL;
+ }
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = r6_surface_init_linear_aligned(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = r6_surface_init_1d(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = r6_surface_init_2d(surf_man, surf, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+static int r6_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ /* no value to optimize for r6xx/r7xx */
+ return 0;
+}
+
+
+/* ===========================================================================
+ * evergreen family
+ */
+static int eg_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 16) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+ drmFreeVersion(version);
+
+ switch (tiling_config & 0xf) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ surf_man->hw_info.num_pipes = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf0) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ case 2:
+ surf_man->hw_info.num_banks = 16;
+ break;
+ default:
+ surf_man->hw_info.num_banks = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf00) >> 8) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ surf_man->hw_info.group_bytes = 256;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf000) >> 12) {
+ case 0:
+ surf_man->hw_info.row_size = 1024;
+ break;
+ case 1:
+ surf_man->hw_info.row_size = 2048;
+ break;
+ case 2:
+ surf_man->hw_info.row_size = 4096;
+ break;
+ default:
+ surf_man->hw_info.row_size = 4096;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+ return 0;
+}
+
+static void eg_surf_minify(struct radeon_surface *surf,
+ struct radeon_surface_level *surflevel,
+ unsigned bpe,
+ unsigned level,
+ unsigned slice_pt,
+ unsigned mtilew,
+ unsigned mtileh,
+ unsigned mtileb,
+ uint64_t offset)
+{
+ unsigned mtile_pr, mtile_ps;
+
+ surflevel->npix_x = mip_minify(surf->npix_x, level);
+ surflevel->npix_y = mip_minify(surf->npix_y, level);
+ surflevel->npix_z = mip_minify(surf->npix_z, level);
+ surflevel->nblk_x = (surflevel->npix_x + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (surflevel->npix_y + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (surflevel->npix_z + surf->blk_d - 1) / surf->blk_d;
+ if (surf->nsamples == 1 && surflevel->mode == RADEON_SURF_MODE_2D &&
+ !(surf->flags & RADEON_SURF_FMASK)) {
+ if (surflevel->nblk_x < mtilew || surflevel->nblk_y < mtileh) {
+ surflevel->mode = RADEON_SURF_MODE_1D;
+ return;
+ }
+ }
+ surflevel->nblk_x = ALIGN(surflevel->nblk_x, mtilew);
+ surflevel->nblk_y = ALIGN(surflevel->nblk_y, mtileh);
+ surflevel->nblk_z = ALIGN(surflevel->nblk_z, 1);
+
+ /* macro tile per row */
+ mtile_pr = surflevel->nblk_x / mtilew;
+ /* macro tile per slice */
+ mtile_ps = (mtile_pr * surflevel->nblk_y) / mtileh;
+
+ surflevel->offset = offset;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
+ surflevel->slice_size = (uint64_t)mtile_ps * mtileb * slice_pt;
+
+ surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
+}
+
+static int eg_surface_init_1d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, tilew;
+ unsigned i;
+
+ /* compute alignment */
+ tilew = 8;
+ xalign = surf_man->hw_info.group_bytes / (tilew * bpe * surf->nsamples);
+ xalign = MAX2(tilew, xalign);
+ yalign = tilew;
+ zalign = 1;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((bpe == 1) ? 64 : 32, xalign);
+ }
+
+ if (!start_level) {
+ unsigned alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (offset) {
+ offset = ALIGN(offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_1D;
+ surf_minify(surf, level+i, bpe, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int eg_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe, unsigned tile_split,
+ uint64_t offset, unsigned start_level)
+{
+ unsigned tilew, tileh, tileb;
+ unsigned mtilew, mtileh, mtileb;
+ unsigned slice_pt;
+ unsigned i;
+
+ /* compute tile values */
+ tilew = 8;
+ tileh = 8;
+ tileb = tilew * tileh * bpe * surf->nsamples;
+ /* slices per tile */
+ slice_pt = 1;
+ if (tileb > tile_split && tile_split) {
+ slice_pt = tileb / tile_split;
+ }
+ tileb = tileb / slice_pt;
+
+ /* macro tile width & height */
+ mtilew = (tilew * surf->bankw * surf_man->hw_info.num_pipes) * surf->mtilea;
+ mtileh = (tileh * surf->bankh * surf_man->hw_info.num_banks) / surf->mtilea;
+ /* macro tile bytes */
+ mtileb = (mtilew / tilew) * (mtileh / tileh) * tileb;
+
+ if (!start_level) {
+ unsigned alignment = MAX2(256, mtileb);
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (offset) {
+ offset = ALIGN(offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_2D;
+ eg_surf_minify(surf, level+i, bpe, i, slice_pt, mtilew, mtileh, mtileb, offset);
+ if (level[i].mode == RADEON_SURF_MODE_1D) {
+ return eg_surface_init_1d(surf_man, surf, level, bpe, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int eg_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned mode)
+{
+ unsigned tileb;
+
+ /* check surface dimension */
+ if (surf->npix_x > 16384 || surf->npix_y > 16384 || surf->npix_z > 16384) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 15) {
+ return -EINVAL;
+ }
+
+ /* force 1d on kernel that can't do 2d */
+ if (!surf_man->hw_info.allow_2d && mode > RADEON_SURF_MODE_1D) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 2D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ /* check tile split */
+ if (mode == RADEON_SURF_MODE_2D) {
+ switch (surf->tile_split) {
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (surf->mtilea) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check aspect ratio */
+ if (surf_man->hw_info.num_banks < surf->mtilea) {
+ return -EINVAL;
+ }
+ /* check bank width */
+ switch (surf->bankw) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check bank height */
+ switch (surf->bankh) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ tileb = MIN2(surf->tile_split, 64 * surf->bpe * surf->nsamples);
+ if ((tileb * surf->bankh * surf->bankw) < surf_man->hw_info.group_bytes) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int eg_surface_init_1d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned zs_flags = RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER;
+ int r, is_depth_stencil = (surf->flags & zs_flags) == zs_flags;
+ /* Old libdrm_macros.headers didn't have stencil_level in it. This prevents crashes. */
+ struct radeon_surface_level tmp[RADEON_SURF_MAX_LEVEL];
+ struct radeon_surface_level *stencil_level =
+ (surf->flags & RADEON_SURF_HAS_SBUFFER_MIPTREE) ? surf->stencil_level : tmp;
+
+ r = eg_surface_init_1d(surf_man, surf, surf->level, surf->bpe, 0, 0);
+ if (r)
+ return r;
+
+ if (is_depth_stencil) {
+ r = eg_surface_init_1d(surf_man, surf, stencil_level, 1,
+ surf->bo_size, 0);
+ surf->stencil_offset = stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int eg_surface_init_2d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned zs_flags = RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER;
+ int r, is_depth_stencil = (surf->flags & zs_flags) == zs_flags;
+ /* Old libdrm_macros.headers didn't have stencil_level in it. This prevents crashes. */
+ struct radeon_surface_level tmp[RADEON_SURF_MAX_LEVEL];
+ struct radeon_surface_level *stencil_level =
+ (surf->flags & RADEON_SURF_HAS_SBUFFER_MIPTREE) ? surf->stencil_level : tmp;
+
+ r = eg_surface_init_2d(surf_man, surf, surf->level, surf->bpe,
+ surf->tile_split, 0, 0);
+ if (r)
+ return r;
+
+ if (is_depth_stencil) {
+ r = eg_surface_init_2d(surf_man, surf, stencil_level, 1,
+ surf->stencil_tile_split, surf->bo_size, 0);
+ surf->stencil_offset = stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int eg_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode;
+ int r;
+
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ /* zbuffer only support 1D or 2D tiled surface */
+ switch (mode) {
+ case RADEON_SURF_MODE_1D:
+ case RADEON_SURF_MODE_2D:
+ break;
+ default:
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ break;
+ }
+ }
+
+ r = eg_surface_sanity(surf_man, surf, mode);
+ if (r) {
+ return r;
+ }
+
+ surf->stencil_offset = 0;
+ surf->bo_alignment = 0;
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = r6_surface_init_linear_aligned(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = eg_surface_init_1d_miptrees(surf_man, surf);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = eg_surface_init_2d_miptrees(surf_man, surf);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+static unsigned log2_int(unsigned x)
+{
+ unsigned l;
+
+ if (x < 2) {
+ return 0;
+ }
+ for (l = 2; ; l++) {
+ if ((unsigned)(1 << l) > x) {
+ return l - 1;
+ }
+ }
+ return 0;
+}
+
+/* compute best tile_split, bankw, bankh, mtilea
+ * depending on surface
+ */
+static int eg_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tileb, h_over_w;
+ int r;
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ /* set some default value to avoid sanity check choking on them */
+ surf->tile_split = 1024;
+ surf->bankw = 1;
+ surf->bankh = 1;
+ surf->mtilea = surf_man->hw_info.num_banks;
+ tileb = MIN2(surf->tile_split, 64 * surf->bpe * surf->nsamples);
+ for (; surf->bankh <= 8; surf->bankh *= 2) {
+ if ((tileb * surf->bankh * surf->bankw) >= surf_man->hw_info.group_bytes) {
+ break;
+ }
+ }
+ if (surf->mtilea > 8) {
+ surf->mtilea = 8;
+ }
+
+ r = eg_surface_sanity(surf_man, surf, mode);
+ if (r) {
+ return r;
+ }
+
+ if (mode != RADEON_SURF_MODE_2D) {
+ /* nothing to do for non 2D tiled surface */
+ return 0;
+ }
+
+ /* Tweak TILE_SPLIT for performance here. */
+ if (surf->nsamples > 1) {
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ switch (surf->nsamples) {
+ case 2:
+ surf->tile_split = 128;
+ break;
+ case 4:
+ surf->tile_split = 128;
+ break;
+ case 8:
+ surf->tile_split = 256;
+ break;
+ case 16: /* cayman only */
+ surf->tile_split = 512;
+ break;
+ default:
+ fprintf(stderr, "radeon: Wrong number of samples %i (%i)\n",
+ surf->nsamples, __LINE__);
+ return -EINVAL;
+ }
+ surf->stencil_tile_split = 64;
+ } else {
+ /* tile split must be >= 256 for colorbuffer surfaces,
+ * SAMPLE_SPLIT = tile_split / (bpe * 64), the optimal value is 2
+ */
+ surf->tile_split = MAX2(2 * surf->bpe * 64, 256);
+ if (surf->tile_split > 4096)
+ surf->tile_split = 4096;
+ }
+ } else {
+ /* set tile split to row size */
+ surf->tile_split = surf_man->hw_info.row_size;
+ surf->stencil_tile_split = surf_man->hw_info.row_size / 2;
+ }
+
+ /* bankw or bankh greater than 1 increase alignment requirement, not
+ * sure if it's worth using smaller bankw & bankh to stick with 2D
+ * tiling on small surface rather than falling back to 1D tiling.
+ * Use recommanded value based on tile size for now.
+ *
+ * fmask buffer has different optimal value figure them out once we
+ * use it.
+ */
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ /* assume 1 bytes for stencil, we optimize for stencil as stencil
+ * and depth shares surface values
+ */
+ tileb = MIN2(surf->tile_split, 64 * surf->nsamples);
+ } else {
+ tileb = MIN2(surf->tile_split, 64 * surf->bpe * surf->nsamples);
+ }
+
+ /* use bankw of 1 to minimize width alignment, might be interesting to
+ * increase it for large surface
+ */
+ surf->bankw = 1;
+ switch (tileb) {
+ case 64:
+ surf->bankh = 4;
+ break;
+ case 128:
+ case 256:
+ surf->bankh = 2;
+ break;
+ default:
+ surf->bankh = 1;
+ break;
+ }
+ /* double check the constraint */
+ for (; surf->bankh <= 8; surf->bankh *= 2) {
+ if ((tileb * surf->bankh * surf->bankw) >= surf_man->hw_info.group_bytes) {
+ break;
+ }
+ }
+
+ h_over_w = (((surf->bankh * surf_man->hw_info.num_banks) << 16) /
+ (surf->bankw * surf_man->hw_info.num_pipes)) >> 16;
+ surf->mtilea = 1 << (log2_int(h_over_w) >> 1);
+
+ return 0;
+}
+
+
+/* ===========================================================================
+ * Southern Islands family
+ */
+#define SI__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
+#define SI__PIPE_CONFIG__ADDR_SURF_P2 0
+#define SI__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
+#define SI__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
+#define SI__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
+#define SI__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
+#define SI__GB_TILE_MODE__TILE_SPLIT(x) (((x) >> 11) & 0x7)
+#define SI__TILE_SPLIT__64B 0
+#define SI__TILE_SPLIT__128B 1
+#define SI__TILE_SPLIT__256B 2
+#define SI__TILE_SPLIT__512B 3
+#define SI__TILE_SPLIT__1024B 4
+#define SI__TILE_SPLIT__2048B 5
+#define SI__TILE_SPLIT__4096B 6
+#define SI__GB_TILE_MODE__BANK_WIDTH(x) (((x) >> 14) & 0x3)
+#define SI__BANK_WIDTH__1 0
+#define SI__BANK_WIDTH__2 1
+#define SI__BANK_WIDTH__4 2
+#define SI__BANK_WIDTH__8 3
+#define SI__GB_TILE_MODE__BANK_HEIGHT(x) (((x) >> 16) & 0x3)
+#define SI__BANK_HEIGHT__1 0
+#define SI__BANK_HEIGHT__2 1
+#define SI__BANK_HEIGHT__4 2
+#define SI__BANK_HEIGHT__8 3
+#define SI__GB_TILE_MODE__MACRO_TILE_ASPECT(x) (((x) >> 18) & 0x3)
+#define SI__MACRO_TILE_ASPECT__1 0
+#define SI__MACRO_TILE_ASPECT__2 1
+#define SI__MACRO_TILE_ASPECT__4 2
+#define SI__MACRO_TILE_ASPECT__8 3
+#define SI__GB_TILE_MODE__NUM_BANKS(x) (((x) >> 20) & 0x3)
+#define SI__NUM_BANKS__2_BANK 0
+#define SI__NUM_BANKS__4_BANK 1
+#define SI__NUM_BANKS__8_BANK 2
+#define SI__NUM_BANKS__16_BANK 3
+
+
+static void si_gb_tile_mode(uint32_t gb_tile_mode,
+ unsigned *num_pipes,
+ unsigned *num_banks,
+ uint32_t *macro_tile_aspect,
+ uint32_t *bank_w,
+ uint32_t *bank_h,
+ uint32_t *tile_split)
+{
+ if (num_pipes) {
+ switch (SI__GB_TILE_MODE__PIPE_CONFIG(gb_tile_mode)) {
+ case SI__PIPE_CONFIG__ADDR_SURF_P2:
+ default:
+ *num_pipes = 2;
+ break;
+ case SI__PIPE_CONFIG__ADDR_SURF_P4_8x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P4_16x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P4_16x32:
+ case SI__PIPE_CONFIG__ADDR_SURF_P4_32x32:
+ *num_pipes = 4;
+ break;
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
+ *num_pipes = 8;
+ break;
+ }
+ }
+ if (num_banks) {
+ switch (SI__GB_TILE_MODE__NUM_BANKS(gb_tile_mode)) {
+ default:
+ case SI__NUM_BANKS__2_BANK:
+ *num_banks = 2;
+ break;
+ case SI__NUM_BANKS__4_BANK:
+ *num_banks = 4;
+ break;
+ case SI__NUM_BANKS__8_BANK:
+ *num_banks = 8;
+ break;
+ case SI__NUM_BANKS__16_BANK:
+ *num_banks = 16;
+ break;
+ }
+ }
+ if (macro_tile_aspect) {
+ switch (SI__GB_TILE_MODE__MACRO_TILE_ASPECT(gb_tile_mode)) {
+ default:
+ case SI__MACRO_TILE_ASPECT__1:
+ *macro_tile_aspect = 1;
+ break;
+ case SI__MACRO_TILE_ASPECT__2:
+ *macro_tile_aspect = 2;
+ break;
+ case SI__MACRO_TILE_ASPECT__4:
+ *macro_tile_aspect = 4;
+ break;
+ case SI__MACRO_TILE_ASPECT__8:
+ *macro_tile_aspect = 8;
+ break;
+ }
+ }
+ if (bank_w) {
+ switch (SI__GB_TILE_MODE__BANK_WIDTH(gb_tile_mode)) {
+ default:
+ case SI__BANK_WIDTH__1:
+ *bank_w = 1;
+ break;
+ case SI__BANK_WIDTH__2:
+ *bank_w = 2;
+ break;
+ case SI__BANK_WIDTH__4:
+ *bank_w = 4;
+ break;
+ case SI__BANK_WIDTH__8:
+ *bank_w = 8;
+ break;
+ }
+ }
+ if (bank_h) {
+ switch (SI__GB_TILE_MODE__BANK_HEIGHT(gb_tile_mode)) {
+ default:
+ case SI__BANK_HEIGHT__1:
+ *bank_h = 1;
+ break;
+ case SI__BANK_HEIGHT__2:
+ *bank_h = 2;
+ break;
+ case SI__BANK_HEIGHT__4:
+ *bank_h = 4;
+ break;
+ case SI__BANK_HEIGHT__8:
+ *bank_h = 8;
+ break;
+ }
+ }
+ if (tile_split) {
+ switch (SI__GB_TILE_MODE__TILE_SPLIT(gb_tile_mode)) {
+ default:
+ case SI__TILE_SPLIT__64B:
+ *tile_split = 64;
+ break;
+ case SI__TILE_SPLIT__128B:
+ *tile_split = 128;
+ break;
+ case SI__TILE_SPLIT__256B:
+ *tile_split = 256;
+ break;
+ case SI__TILE_SPLIT__512B:
+ *tile_split = 512;
+ break;
+ case SI__TILE_SPLIT__1024B:
+ *tile_split = 1024;
+ break;
+ case SI__TILE_SPLIT__2048B:
+ *tile_split = 2048;
+ break;
+ case SI__TILE_SPLIT__4096B:
+ *tile_split = 4096;
+ break;
+ }
+ }
+}
+
+static int si_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 33) {
+ if (!radeon_get_value(surf_man->fd, RADEON_INFO_SI_TILE_MODE_ARRAY, surf_man->hw_info.tile_mode_array)) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+ }
+ drmFreeVersion(version);
+
+ switch (tiling_config & 0xf) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ surf_man->hw_info.num_pipes = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf0) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ case 2:
+ surf_man->hw_info.num_banks = 16;
+ break;
+ default:
+ surf_man->hw_info.num_banks = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf00) >> 8) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ surf_man->hw_info.group_bytes = 256;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf000) >> 12) {
+ case 0:
+ surf_man->hw_info.row_size = 1024;
+ break;
+ case 1:
+ surf_man->hw_info.row_size = 2048;
+ break;
+ case 2:
+ surf_man->hw_info.row_size = 4096;
+ break;
+ default:
+ surf_man->hw_info.row_size = 4096;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+ return 0;
+}
+
+static int si_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned mode, unsigned *tile_mode, unsigned *stencil_tile_mode)
+{
+ uint32_t gb_tile_mode;
+
+ /* check surface dimension */
+ if (surf->npix_x > 16384 || surf->npix_y > 16384 || surf->npix_z > 16384) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 15) {
+ return -EINVAL;
+ }
+
+ /* force 1d on kernel that can't do 2d */
+ if (mode > RADEON_SURF_MODE_1D &&
+ (!surf_man->hw_info.allow_2d || !(surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX))) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 1D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ if (surf->nsamples > 1 && mode != RADEON_SURF_MODE_2D) {
+ return -EINVAL;
+ }
+
+ if (!surf->tile_split) {
+ /* default value */
+ surf->mtilea = 1;
+ surf->bankw = 1;
+ surf->bankh = 1;
+ surf->tile_split = 64;
+ surf->stencil_tile_split = 64;
+ }
+
+ switch (mode) {
+ case RADEON_SURF_MODE_2D:
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ switch (surf->nsamples) {
+ case 1:
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D;
+ break;
+ case 2:
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_2AA;
+ break;
+ case 4:
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_4AA;
+ break;
+ case 8:
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_8AA;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* retrieve tiling mode value */
+ gb_tile_mode = surf_man->hw_info.tile_mode_array[*stencil_tile_mode];
+ si_gb_tile_mode(gb_tile_mode, NULL, NULL, NULL, NULL, NULL, &surf->stencil_tile_split);
+ }
+ if (surf->flags & RADEON_SURF_ZBUFFER) {
+ switch (surf->nsamples) {
+ case 1:
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D;
+ break;
+ case 2:
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_2AA;
+ break;
+ case 4:
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_4AA;
+ break;
+ case 8:
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_8AA;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (surf->flags & RADEON_SURF_SCANOUT) {
+ switch (surf->bpe) {
+ case 2:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
+ break;
+ case 4:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (surf->bpe) {
+ case 1:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_8BPP;
+ break;
+ case 2:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_16BPP;
+ break;
+ case 4:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_32BPP;
+ break;
+ case 8:
+ case 16:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_64BPP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ /* retrieve tiling mode value */
+ gb_tile_mode = surf_man->hw_info.tile_mode_array[*tile_mode];
+ si_gb_tile_mode(gb_tile_mode, NULL, NULL, &surf->mtilea, &surf->bankw, &surf->bankh, &surf->tile_split);
+ break;
+ case RADEON_SURF_MODE_1D:
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_1D;
+ }
+ if (surf->flags & RADEON_SURF_ZBUFFER) {
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_1D;
+ } else if (surf->flags & RADEON_SURF_SCANOUT) {
+ *tile_mode = SI_TILE_MODE_COLOR_1D_SCANOUT;
+ } else {
+ *tile_mode = SI_TILE_MODE_COLOR_1D;
+ }
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ default:
+ *tile_mode = SI_TILE_MODE_COLOR_LINEAR_ALIGNED;
+ }
+
+ return 0;
+}
+
+static void si_surf_minify(struct radeon_surface *surf,
+ struct radeon_surface_level *surflevel,
+ unsigned bpe, unsigned level,
+ uint32_t xalign, uint32_t yalign, uint32_t zalign,
+ uint32_t slice_align, uint64_t offset)
+{
+ if (level == 0) {
+ surflevel->npix_x = surf->npix_x;
+ } else {
+ surflevel->npix_x = mip_minify(next_power_of_two(surf->npix_x), level);
+ }
+ surflevel->npix_y = mip_minify(surf->npix_y, level);
+ surflevel->npix_z = mip_minify(surf->npix_z, level);
+
+ if (level == 0 && surf->last_level > 0) {
+ surflevel->nblk_x = (next_power_of_two(surflevel->npix_x) + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (next_power_of_two(surflevel->npix_y) + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (next_power_of_two(surflevel->npix_z) + surf->blk_d - 1) / surf->blk_d;
+ } else {
+ surflevel->nblk_x = (surflevel->npix_x + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (surflevel->npix_y + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (surflevel->npix_z + surf->blk_d - 1) / surf->blk_d;
+ }
+
+ surflevel->nblk_y = ALIGN(surflevel->nblk_y, yalign);
+
+ /* XXX: Texture sampling uses unexpectedly large pitches in some cases,
+ * these are just guesses for the rules behind those
+ */
+ if (level == 0 && surf->last_level == 0)
+ /* Non-mipmap pitch padded to slice alignment */
+ /* Using just bpe here breaks stencil blitting; surf->bpe works. */
+ xalign = MAX2(xalign, slice_align / surf->bpe);
+ else if (surflevel->mode == RADEON_SURF_MODE_LINEAR_ALIGNED)
+ /* Small rows evenly distributed across slice */
+ xalign = MAX2(xalign, slice_align / bpe / surflevel->nblk_y);
+
+ surflevel->nblk_x = ALIGN(surflevel->nblk_x, xalign);
+ surflevel->nblk_z = ALIGN(surflevel->nblk_z, zalign);
+
+ surflevel->offset = offset;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
+ surflevel->slice_size = ALIGN((uint64_t)surflevel->pitch_bytes * surflevel->nblk_y,
+ (uint64_t)slice_align);
+
+ surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
+}
+
+static void si_surf_minify_2d(struct radeon_surface *surf,
+ struct radeon_surface_level *surflevel,
+ unsigned bpe, unsigned level, unsigned slice_pt,
+ uint32_t xalign, uint32_t yalign, uint32_t zalign,
+ unsigned mtileb, uint64_t offset)
+{
+ unsigned mtile_pr, mtile_ps;
+
+ if (level == 0) {
+ surflevel->npix_x = surf->npix_x;
+ } else {
+ surflevel->npix_x = mip_minify(next_power_of_two(surf->npix_x), level);
+ }
+ surflevel->npix_y = mip_minify(surf->npix_y, level);
+ surflevel->npix_z = mip_minify(surf->npix_z, level);
+
+ if (level == 0 && surf->last_level > 0) {
+ surflevel->nblk_x = (next_power_of_two(surflevel->npix_x) + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (next_power_of_two(surflevel->npix_y) + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (next_power_of_two(surflevel->npix_z) + surf->blk_d - 1) / surf->blk_d;
+ } else {
+ surflevel->nblk_x = (surflevel->npix_x + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (surflevel->npix_y + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (surflevel->npix_z + surf->blk_d - 1) / surf->blk_d;
+ }
+
+ if (surf->nsamples == 1 && surflevel->mode == RADEON_SURF_MODE_2D &&
+ !(surf->flags & RADEON_SURF_FMASK)) {
+ if (surflevel->nblk_x < xalign || surflevel->nblk_y < yalign) {
+ surflevel->mode = RADEON_SURF_MODE_1D;
+ return;
+ }
+ }
+ surflevel->nblk_x = ALIGN(surflevel->nblk_x, xalign);
+ surflevel->nblk_y = ALIGN(surflevel->nblk_y, yalign);
+ surflevel->nblk_z = ALIGN(surflevel->nblk_z, zalign);
+
+ /* macro tile per row */
+ mtile_pr = surflevel->nblk_x / xalign;
+ /* macro tile per slice */
+ mtile_ps = (mtile_pr * surflevel->nblk_y) / yalign;
+ surflevel->offset = offset;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
+ surflevel->slice_size = (uint64_t)mtile_ps * mtileb * slice_pt;
+
+ surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
+}
+
+static int si_surface_init_linear_aligned(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned tile_mode,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, slice_align;
+ unsigned i;
+
+ /* compute alignment */
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+ xalign = MAX2(8, 64 / surf->bpe);
+ yalign = 1;
+ zalign = 1;
+ slice_align = MAX2(64 * surf->bpe, surf_man->hw_info.group_bytes);
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
+ si_surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, slice_align, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
+ surf->tiling_index[i] = tile_mode;
+ }
+ }
+ return 0;
+}
+
+static int si_surface_init_1d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe, unsigned tile_mode,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, slice_align;
+ unsigned alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ unsigned i;
+
+ /* compute alignment */
+ xalign = 8;
+ yalign = 8;
+ zalign = 1;
+ slice_align = surf_man->hw_info.group_bytes;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((bpe == 1) ? 64 : 32, xalign);
+ }
+
+ if (start_level <= 1) {
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (offset) {
+ offset = ALIGN(offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_1D;
+ si_surf_minify(surf, level+i, bpe, i, xalign, yalign, zalign, slice_align, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, alignment);
+ }
+ if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
+ if (surf->level == level) {
+ surf->tiling_index[i] = tile_mode;
+ /* it's ok because stencil is done after */
+ surf->stencil_tiling_index[i] = tile_mode;
+ } else {
+ surf->stencil_tiling_index[i] = tile_mode;
+ }
+ }
+ }
+ return 0;
+}
+
+static int si_surface_init_1d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned tile_mode, unsigned stencil_tile_mode)
+{
+ int r;
+
+ r = si_surface_init_1d(surf_man, surf, surf->level, surf->bpe, tile_mode, 0, 0);
+ if (r) {
+ return r;
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ r = si_surface_init_1d(surf_man, surf, surf->stencil_level, 1, stencil_tile_mode, surf->bo_size, 0);
+ surf->stencil_offset = surf->stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int si_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe, unsigned tile_mode,
+ unsigned num_pipes, unsigned num_banks,
+ unsigned tile_split,
+ uint64_t offset,
+ unsigned start_level)
+{
+ uint64_t aligned_offset = offset;
+ unsigned tilew, tileh, tileb;
+ unsigned mtilew, mtileh, mtileb;
+ unsigned slice_pt;
+ unsigned i;
+
+ /* compute tile values */
+ tilew = 8;
+ tileh = 8;
+ tileb = tilew * tileh * bpe * surf->nsamples;
+ /* slices per tile */
+ slice_pt = 1;
+ if (tileb > tile_split && tile_split) {
+ slice_pt = tileb / tile_split;
+ }
+ tileb = tileb / slice_pt;
+
+ /* macro tile width & height */
+ mtilew = (tilew * surf->bankw * num_pipes) * surf->mtilea;
+ mtileh = (tileh * surf->bankh * num_banks) / surf->mtilea;
+
+ /* macro tile bytes */
+ mtileb = (mtilew / tilew) * (mtileh / tileh) * tileb;
+
+ if (start_level <= 1) {
+ unsigned alignment = MAX2(256, mtileb);
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (aligned_offset) {
+ aligned_offset = ALIGN(aligned_offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_2D;
+ si_surf_minify_2d(surf, level+i, bpe, i, slice_pt, mtilew, mtileh, 1, mtileb, aligned_offset);
+ if (level[i].mode == RADEON_SURF_MODE_1D) {
+ switch (tile_mode) {
+ case SI_TILE_MODE_COLOR_2D_8BPP:
+ case SI_TILE_MODE_COLOR_2D_16BPP:
+ case SI_TILE_MODE_COLOR_2D_32BPP:
+ case SI_TILE_MODE_COLOR_2D_64BPP:
+ tile_mode = SI_TILE_MODE_COLOR_1D;
+ break;
+ case SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP:
+ case SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP:
+ tile_mode = SI_TILE_MODE_COLOR_1D_SCANOUT;
+ break;
+ case SI_TILE_MODE_DEPTH_STENCIL_2D:
+ tile_mode = SI_TILE_MODE_DEPTH_STENCIL_1D;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return si_surface_init_1d(surf_man, surf, level, bpe, tile_mode, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ aligned_offset = offset = surf->bo_size;
+ if (i == 0) {
+ aligned_offset = ALIGN(aligned_offset, surf->bo_alignment);
+ }
+ if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
+ if (surf->level == level) {
+ surf->tiling_index[i] = tile_mode;
+ /* it's ok because stencil is done after */
+ surf->stencil_tiling_index[i] = tile_mode;
+ } else {
+ surf->stencil_tiling_index[i] = tile_mode;
+ }
+ }
+ }
+ return 0;
+}
+
+static int si_surface_init_2d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned tile_mode, unsigned stencil_tile_mode)
+{
+ unsigned num_pipes, num_banks;
+ uint32_t gb_tile_mode;
+ int r;
+
+ /* retrieve tiling mode value */
+ gb_tile_mode = surf_man->hw_info.tile_mode_array[tile_mode];
+ si_gb_tile_mode(gb_tile_mode, &num_pipes, &num_banks, NULL, NULL, NULL, NULL);
+
+ r = si_surface_init_2d(surf_man, surf, surf->level, surf->bpe, tile_mode, num_pipes, num_banks, surf->tile_split, 0, 0);
+ if (r) {
+ return r;
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ r = si_surface_init_2d(surf_man, surf, surf->stencil_level, 1, stencil_tile_mode, num_pipes, num_banks, surf->stencil_tile_split, surf->bo_size, 0);
+ surf->stencil_offset = surf->stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int si_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tile_mode, stencil_tile_mode;
+ int r;
+
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ /* zbuffer only support 1D or 2D tiled surface */
+ switch (mode) {
+ case RADEON_SURF_MODE_1D:
+ case RADEON_SURF_MODE_2D:
+ break;
+ default:
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ break;
+ }
+ }
+
+ r = si_surface_sanity(surf_man, surf, mode, &tile_mode, &stencil_tile_mode);
+ if (r) {
+ return r;
+ }
+
+ surf->stencil_offset = 0;
+ surf->bo_alignment = 0;
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = si_surface_init_linear_aligned(surf_man, surf, tile_mode, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = si_surface_init_1d_miptrees(surf_man, surf, tile_mode, stencil_tile_mode);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = si_surface_init_2d_miptrees(surf_man, surf, tile_mode, stencil_tile_mode);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+/*
+ * depending on surface
+ */
+static int si_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tile_mode, stencil_tile_mode;
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER) &&
+ !(surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX)) {
+ /* depth/stencil force 1d tiling for old mesa */
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ }
+
+ return si_surface_sanity(surf_man, surf, mode, &tile_mode, &stencil_tile_mode);
+}
+
+
+/* ===========================================================================
+ * Sea Islands family
+ */
+#define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
+#define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
+#define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
+#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
+#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
+#define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
+#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
+#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
+#define CIK__GB_TILE_MODE__TILE_SPLIT(x) (((x) >> 11) & 0x7)
+#define CIK__TILE_SPLIT__64B 0
+#define CIK__TILE_SPLIT__128B 1
+#define CIK__TILE_SPLIT__256B 2
+#define CIK__TILE_SPLIT__512B 3
+#define CIK__TILE_SPLIT__1024B 4
+#define CIK__TILE_SPLIT__2048B 5
+#define CIK__TILE_SPLIT__4096B 6
+#define CIK__GB_TILE_MODE__SAMPLE_SPLIT(x) (((x) >> 25) & 0x3)
+#define CIK__SAMPLE_SPLIT__1 0
+#define CIK__SAMPLE_SPLIT__2 1
+#define CIK__SAMPLE_SPLIT__4 2
+#define CIK__SAMPLE_SPLIT__8 3
+#define CIK__GB_MACROTILE_MODE__BANK_WIDTH(x) ((x) & 0x3)
+#define CIK__BANK_WIDTH__1 0
+#define CIK__BANK_WIDTH__2 1
+#define CIK__BANK_WIDTH__4 2
+#define CIK__BANK_WIDTH__8 3
+#define CIK__GB_MACROTILE_MODE__BANK_HEIGHT(x) (((x) >> 2) & 0x3)
+#define CIK__BANK_HEIGHT__1 0
+#define CIK__BANK_HEIGHT__2 1
+#define CIK__BANK_HEIGHT__4 2
+#define CIK__BANK_HEIGHT__8 3
+#define CIK__GB_MACROTILE_MODE__MACRO_TILE_ASPECT(x) (((x) >> 4) & 0x3)
+#define CIK__MACRO_TILE_ASPECT__1 0
+#define CIK__MACRO_TILE_ASPECT__2 1
+#define CIK__MACRO_TILE_ASPECT__4 2
+#define CIK__MACRO_TILE_ASPECT__8 3
+#define CIK__GB_MACROTILE_MODE__NUM_BANKS(x) (((x) >> 6) & 0x3)
+#define CIK__NUM_BANKS__2_BANK 0
+#define CIK__NUM_BANKS__4_BANK 1
+#define CIK__NUM_BANKS__8_BANK 2
+#define CIK__NUM_BANKS__16_BANK 3
+
+
+static void cik_get_2d_params(struct radeon_surface_manager *surf_man,
+ unsigned bpe, unsigned nsamples, bool is_color,
+ unsigned tile_mode,
+ uint32_t *num_pipes,
+ uint32_t *tile_split_ptr,
+ uint32_t *num_banks,
+ uint32_t *macro_tile_aspect,
+ uint32_t *bank_w,
+ uint32_t *bank_h)
+{
+ uint32_t gb_tile_mode = surf_man->hw_info.tile_mode_array[tile_mode];
+ unsigned tileb_1x, tileb;
+ unsigned gb_macrotile_mode;
+ unsigned macrotile_index;
+ unsigned tile_split, sample_split;
+
+ if (num_pipes) {
+ switch (CIK__GB_TILE_MODE__PIPE_CONFIG(gb_tile_mode)) {
+ case CIK__PIPE_CONFIG__ADDR_SURF_P2:
+ default:
+ *num_pipes = 2;
+ break;
+ case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
+ *num_pipes = 4;
+ break;
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
+ *num_pipes = 8;
+ break;
+ case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
+ *num_pipes = 16;
+ break;
+ }
+ }
+ switch (CIK__GB_TILE_MODE__TILE_SPLIT(gb_tile_mode)) {
+ default:
+ case CIK__TILE_SPLIT__64B:
+ tile_split = 64;
+ break;
+ case CIK__TILE_SPLIT__128B:
+ tile_split = 128;
+ break;
+ case CIK__TILE_SPLIT__256B:
+ tile_split = 256;
+ break;
+ case CIK__TILE_SPLIT__512B:
+ tile_split = 512;
+ break;
+ case CIK__TILE_SPLIT__1024B:
+ tile_split = 1024;
+ break;
+ case CIK__TILE_SPLIT__2048B:
+ tile_split = 2048;
+ break;
+ case CIK__TILE_SPLIT__4096B:
+ tile_split = 4096;
+ break;
+ }
+ switch (CIK__GB_TILE_MODE__SAMPLE_SPLIT(gb_tile_mode)) {
+ default:
+ case CIK__SAMPLE_SPLIT__1:
+ sample_split = 1;
+ break;
+ case CIK__SAMPLE_SPLIT__2:
+ sample_split = 2;
+ break;
+ case CIK__SAMPLE_SPLIT__4:
+ sample_split = 4;
+ break;
+ case CIK__SAMPLE_SPLIT__8:
+ sample_split = 8;
+ break;
+ }
+
+ /* Adjust the tile split. */
+ tileb_1x = 8 * 8 * bpe;
+ if (is_color) {
+ tile_split = MAX2(256, sample_split * tileb_1x);
+ }
+ tile_split = MIN2(surf_man->hw_info.row_size, tile_split);
+
+ /* Determine the macrotile index. */
+ tileb = MIN2(tile_split, nsamples * tileb_1x);
+
+ for (macrotile_index = 0; tileb > 64; macrotile_index++) {
+ tileb >>= 1;
+ }
+ gb_macrotile_mode = surf_man->hw_info.macrotile_mode_array[macrotile_index];
+
+ if (tile_split_ptr) {
+ *tile_split_ptr = tile_split;
+ }
+ if (num_banks) {
+ switch (CIK__GB_MACROTILE_MODE__NUM_BANKS(gb_macrotile_mode)) {
+ default:
+ case CIK__NUM_BANKS__2_BANK:
+ *num_banks = 2;
+ break;
+ case CIK__NUM_BANKS__4_BANK:
+ *num_banks = 4;
+ break;
+ case CIK__NUM_BANKS__8_BANK:
+ *num_banks = 8;
+ break;
+ case CIK__NUM_BANKS__16_BANK:
+ *num_banks = 16;
+ break;
+ }
+ }
+ if (macro_tile_aspect) {
+ switch (CIK__GB_MACROTILE_MODE__MACRO_TILE_ASPECT(gb_macrotile_mode)) {
+ default:
+ case CIK__MACRO_TILE_ASPECT__1:
+ *macro_tile_aspect = 1;
+ break;
+ case CIK__MACRO_TILE_ASPECT__2:
+ *macro_tile_aspect = 2;
+ break;
+ case CIK__MACRO_TILE_ASPECT__4:
+ *macro_tile_aspect = 4;
+ break;
+ case CIK__MACRO_TILE_ASPECT__8:
+ *macro_tile_aspect = 8;
+ break;
+ }
+ }
+ if (bank_w) {
+ switch (CIK__GB_MACROTILE_MODE__BANK_WIDTH(gb_macrotile_mode)) {
+ default:
+ case CIK__BANK_WIDTH__1:
+ *bank_w = 1;
+ break;
+ case CIK__BANK_WIDTH__2:
+ *bank_w = 2;
+ break;
+ case CIK__BANK_WIDTH__4:
+ *bank_w = 4;
+ break;
+ case CIK__BANK_WIDTH__8:
+ *bank_w = 8;
+ break;
+ }
+ }
+ if (bank_h) {
+ switch (CIK__GB_MACROTILE_MODE__BANK_HEIGHT(gb_macrotile_mode)) {
+ default:
+ case CIK__BANK_HEIGHT__1:
+ *bank_h = 1;
+ break;
+ case CIK__BANK_HEIGHT__2:
+ *bank_h = 2;
+ break;
+ case CIK__BANK_HEIGHT__4:
+ *bank_h = 4;
+ break;
+ case CIK__BANK_HEIGHT__8:
+ *bank_h = 8;
+ break;
+ }
+ }
+}
+
+static int cik_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 35) {
+ if (!radeon_get_value(surf_man->fd, RADEON_INFO_SI_TILE_MODE_ARRAY, surf_man->hw_info.tile_mode_array) &&
+ !radeon_get_value(surf_man->fd, RADEON_INFO_CIK_MACROTILE_MODE_ARRAY, surf_man->hw_info.macrotile_mode_array)) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+ }
+ drmFreeVersion(version);
+
+ switch (tiling_config & 0xf) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ surf_man->hw_info.num_pipes = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf0) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ case 2:
+ surf_man->hw_info.num_banks = 16;
+ break;
+ default:
+ surf_man->hw_info.num_banks = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf00) >> 8) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ surf_man->hw_info.group_bytes = 256;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf000) >> 12) {
+ case 0:
+ surf_man->hw_info.row_size = 1024;
+ break;
+ case 1:
+ surf_man->hw_info.row_size = 2048;
+ break;
+ case 2:
+ surf_man->hw_info.row_size = 4096;
+ break;
+ default:
+ surf_man->hw_info.row_size = 4096;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+ return 0;
+}
+
+static int cik_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned mode, unsigned *tile_mode, unsigned *stencil_tile_mode)
+{
+ /* check surface dimension */
+ if (surf->npix_x > 16384 || surf->npix_y > 16384 || surf->npix_z > 16384) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 15) {
+ return -EINVAL;
+ }
+
+ /* force 1d on kernel that can't do 2d */
+ if (mode > RADEON_SURF_MODE_1D &&
+ (!surf_man->hw_info.allow_2d || !(surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX))) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 1D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ if (surf->nsamples > 1 && mode != RADEON_SURF_MODE_2D) {
+ return -EINVAL;
+ }
+
+ if (!surf->tile_split) {
+ /* default value */
+ surf->mtilea = 1;
+ surf->bankw = 1;
+ surf->bankh = 1;
+ surf->tile_split = 64;
+ surf->stencil_tile_split = 64;
+ }
+
+ switch (mode) {
+ case RADEON_SURF_MODE_2D: {
+ if (surf->flags & RADEON_SURF_Z_OR_SBUFFER) {
+ switch (surf->nsamples) {
+ case 1:
+ *tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64;
+ break;
+ case 2:
+ case 4:
+ *tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128;
+ break;
+ case 8:
+ *tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ *stencil_tile_mode = *tile_mode;
+
+ cik_get_2d_params(surf_man, 1, surf->nsamples, false,
+ *stencil_tile_mode, NULL,
+ &surf->stencil_tile_split,
+ NULL, NULL, NULL, NULL);
+ }
+ } else if (surf->flags & RADEON_SURF_SCANOUT) {
+ *tile_mode = CIK_TILE_MODE_COLOR_2D_SCANOUT;
+ } else {
+ *tile_mode = CIK_TILE_MODE_COLOR_2D;
+ }
+
+ /* retrieve tiling mode values */
+ cik_get_2d_params(surf_man, surf->bpe, surf->nsamples,
+ !(surf->flags & RADEON_SURF_Z_OR_SBUFFER), *tile_mode,
+ NULL, &surf->tile_split, NULL, &surf->mtilea,
+ &surf->bankw, &surf->bankh);
+ break;
+ }
+ case RADEON_SURF_MODE_1D:
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ *stencil_tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_1D;
+ }
+ if (surf->flags & RADEON_SURF_ZBUFFER) {
+ *tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_1D;
+ } else if (surf->flags & RADEON_SURF_SCANOUT) {
+ *tile_mode = SI_TILE_MODE_COLOR_1D_SCANOUT;
+ } else {
+ *tile_mode = SI_TILE_MODE_COLOR_1D;
+ }
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ default:
+ *tile_mode = SI_TILE_MODE_COLOR_LINEAR_ALIGNED;
+ }
+
+ return 0;
+}
+
+static int cik_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe, unsigned tile_mode,
+ unsigned tile_split,
+ unsigned num_pipes, unsigned num_banks,
+ uint64_t offset,
+ unsigned start_level)
+{
+ uint64_t aligned_offset = offset;
+ unsigned tilew, tileh, tileb_1x, tileb;
+ unsigned mtilew, mtileh, mtileb;
+ unsigned slice_pt;
+ unsigned i;
+
+ /* compute tile values */
+ tilew = 8;
+ tileh = 8;
+ tileb_1x = tilew * tileh * bpe;
+
+ tile_split = MIN2(surf_man->hw_info.row_size, tile_split);
+
+ tileb = surf->nsamples * tileb_1x;
+
+ /* slices per tile */
+ slice_pt = 1;
+ if (tileb > tile_split && tile_split) {
+ slice_pt = tileb / tile_split;
+ tileb = tileb / slice_pt;
+ }
+
+ /* macro tile width & height */
+ mtilew = (tilew * surf->bankw * num_pipes) * surf->mtilea;
+ mtileh = (tileh * surf->bankh * num_banks) / surf->mtilea;
+
+ /* macro tile bytes */
+ mtileb = (mtilew / tilew) * (mtileh / tileh) * tileb;
+
+ if (start_level <= 1) {
+ unsigned alignment = MAX2(256, mtileb);
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (aligned_offset) {
+ aligned_offset = ALIGN(aligned_offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_2D;
+ si_surf_minify_2d(surf, level+i, bpe, i, slice_pt, mtilew, mtileh, 1, mtileb, aligned_offset);
+ if (level[i].mode == RADEON_SURF_MODE_1D) {
+ switch (tile_mode) {
+ case CIK_TILE_MODE_COLOR_2D:
+ tile_mode = SI_TILE_MODE_COLOR_1D;
+ break;
+ case CIK_TILE_MODE_COLOR_2D_SCANOUT:
+ tile_mode = SI_TILE_MODE_COLOR_1D_SCANOUT;
+ break;
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64:
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128:
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256:
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_512:
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_ROW_SIZE:
+ tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_1D;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return si_surface_init_1d(surf_man, surf, level, bpe, tile_mode, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ aligned_offset = offset = surf->bo_size;
+ if (i == 0) {
+ aligned_offset = ALIGN(aligned_offset, surf->bo_alignment);
+ }
+ if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
+ if (surf->level == level) {
+ surf->tiling_index[i] = tile_mode;
+ /* it's ok because stencil is done after */
+ surf->stencil_tiling_index[i] = tile_mode;
+ } else {
+ surf->stencil_tiling_index[i] = tile_mode;
+ }
+ }
+ }
+ return 0;
+}
+
+static int cik_surface_init_2d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned tile_mode, unsigned stencil_tile_mode)
+{
+ int r;
+ uint32_t num_pipes, num_banks;
+
+ cik_get_2d_params(surf_man, surf->bpe, surf->nsamples,
+ !(surf->flags & RADEON_SURF_Z_OR_SBUFFER), tile_mode,
+ &num_pipes, NULL, &num_banks, NULL, NULL, NULL);
+
+ r = cik_surface_init_2d(surf_man, surf, surf->level, surf->bpe, tile_mode,
+ surf->tile_split, num_pipes, num_banks, 0, 0);
+ if (r) {
+ return r;
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ r = cik_surface_init_2d(surf_man, surf, surf->stencil_level, 1, stencil_tile_mode,
+ surf->stencil_tile_split, num_pipes, num_banks,
+ surf->bo_size, 0);
+ surf->stencil_offset = surf->stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int cik_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tile_mode, stencil_tile_mode;
+ int r;
+
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ /* zbuffer only support 1D or 2D tiled surface */
+ switch (mode) {
+ case RADEON_SURF_MODE_1D:
+ case RADEON_SURF_MODE_2D:
+ break;
+ default:
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ break;
+ }
+ }
+
+ r = cik_surface_sanity(surf_man, surf, mode, &tile_mode, &stencil_tile_mode);
+ if (r) {
+ return r;
+ }
+
+ surf->stencil_offset = 0;
+ surf->bo_alignment = 0;
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = si_surface_init_linear_aligned(surf_man, surf, tile_mode, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = si_surface_init_1d_miptrees(surf_man, surf, tile_mode, stencil_tile_mode);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = cik_surface_init_2d_miptrees(surf_man, surf, tile_mode, stencil_tile_mode);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+/*
+ * depending on surface
+ */
+static int cik_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tile_mode, stencil_tile_mode;
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER) &&
+ !(surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX)) {
+ /* depth/stencil force 1d tiling for old mesa */
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ }
+
+ return cik_surface_sanity(surf_man, surf, mode, &tile_mode, &stencil_tile_mode);
+}
+
+
+/* ===========================================================================
+ * public API
+ */
+struct radeon_surface_manager *
+radeon_surface_manager_new(int fd)
+{
+ struct radeon_surface_manager *surf_man;
+
+ surf_man = calloc(1, sizeof(struct radeon_surface_manager));
+ if (surf_man == NULL) {
+ return NULL;
+ }
+ surf_man->fd = fd;
+ if (radeon_get_value(fd, RADEON_INFO_DEVICE_ID, &surf_man->device_id)) {
+ goto out_err;
+ }
+ if (radeon_get_family(surf_man)) {
+ goto out_err;
+ }
+
+ if (surf_man->family <= CHIP_RV740) {
+ if (r6_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &r6_surface_init;
+ surf_man->surface_best = &r6_surface_best;
+ } else if (surf_man->family <= CHIP_ARUBA) {
+ if (eg_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &eg_surface_init;
+ surf_man->surface_best = &eg_surface_best;
+ } else if (surf_man->family < CHIP_BONAIRE) {
+ if (si_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &si_surface_init;
+ surf_man->surface_best = &si_surface_best;
+ } else {
+ if (cik_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &cik_surface_init;
+ surf_man->surface_best = &cik_surface_best;
+ }
+
+ return surf_man;
+out_err:
+ free(surf_man);
+ return NULL;
+}
+
+void
+radeon_surface_manager_free(struct radeon_surface_manager *surf_man)
+{
+ free(surf_man);
+}
+
+static int radeon_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned type,
+ unsigned mode)
+{
+ if (surf_man == NULL || surf_man->surface_init == NULL || surf == NULL) {
+ return -EINVAL;
+ }
+
+ /* all dimension must be at least 1 ! */
+ if (!surf->npix_x || !surf->npix_y || !surf->npix_z) {
+ return -EINVAL;
+ }
+ if (!surf->blk_w || !surf->blk_h || !surf->blk_d) {
+ return -EINVAL;
+ }
+ if (!surf->array_size) {
+ return -EINVAL;
+ }
+ /* array size must be a power of 2 */
+ surf->array_size = next_power_of_two(surf->array_size);
+
+ switch (surf->nsamples) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check type */
+ switch (type) {
+ case RADEON_SURF_TYPE_1D:
+ if (surf->npix_y > 1) {
+ return -EINVAL;
+ }
+ case RADEON_SURF_TYPE_2D:
+ if (surf->npix_z > 1) {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_SURF_TYPE_CUBEMAP:
+ if (surf->npix_z > 1) {
+ return -EINVAL;
+ }
+ /* deal with cubemap as they were texture array */
+ if (surf_man->family >= CHIP_RV770) {
+ surf->array_size = 8;
+ } else {
+ surf->array_size = 6;
+ }
+ break;
+ case RADEON_SURF_TYPE_3D:
+ break;
+ case RADEON_SURF_TYPE_1D_ARRAY:
+ if (surf->npix_y > 1) {
+ return -EINVAL;
+ }
+ case RADEON_SURF_TYPE_2D_ARRAY:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int
+radeon_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, type;
+ int r;
+
+ type = RADEON_SURF_GET(surf->flags, TYPE);
+ mode = RADEON_SURF_GET(surf->flags, MODE);
+
+ r = radeon_surface_sanity(surf_man, surf, type, mode);
+ if (r) {
+ return r;
+ }
+ return surf_man->surface_init(surf_man, surf);
+}
+
+int
+radeon_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, type;
+ int r;
+
+ type = RADEON_SURF_GET(surf->flags, TYPE);
+ mode = RADEON_SURF_GET(surf->flags, MODE);
+
+ r = radeon_surface_sanity(surf_man, surf, type, mode);
+ if (r) {
+ return r;
+ }
+ return surf_man->surface_best(surf_man, surf);
+}
diff --git a/chromium/third_party/libdrm/src/radeon/radeon_surface.h b/chromium/third_party/libdrm/src/radeon/radeon_surface.h
new file mode 100644
index 00000000000..af7cab67753
--- /dev/null
+++ b/chromium/third_party/libdrm/src/radeon/radeon_surface.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright © 2011 Red Hat All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <jglisse@redhat.com>
+ */
+#ifndef RADEON_SURFACE_H
+#define RADEON_SURFACE_H
+
+/* Note :
+ *
+ * For texture array, the n layer are stored one after the other within each
+ * mipmap level. 0 value for field than can be hint is always valid.
+ */
+
+#define RADEON_SURF_MAX_LEVEL 32
+
+#define RADEON_SURF_TYPE_MASK 0xFF
+#define RADEON_SURF_TYPE_SHIFT 0
+#define RADEON_SURF_TYPE_1D 0
+#define RADEON_SURF_TYPE_2D 1
+#define RADEON_SURF_TYPE_3D 2
+#define RADEON_SURF_TYPE_CUBEMAP 3
+#define RADEON_SURF_TYPE_1D_ARRAY 4
+#define RADEON_SURF_TYPE_2D_ARRAY 5
+#define RADEON_SURF_MODE_MASK 0xFF
+#define RADEON_SURF_MODE_SHIFT 8
+#define RADEON_SURF_MODE_LINEAR 0
+#define RADEON_SURF_MODE_LINEAR_ALIGNED 1
+#define RADEON_SURF_MODE_1D 2
+#define RADEON_SURF_MODE_2D 3
+#define RADEON_SURF_SCANOUT (1 << 16)
+#define RADEON_SURF_ZBUFFER (1 << 17)
+#define RADEON_SURF_SBUFFER (1 << 18)
+#define RADEON_SURF_Z_OR_SBUFFER (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)
+#define RADEON_SURF_HAS_SBUFFER_MIPTREE (1 << 19)
+#define RADEON_SURF_HAS_TILE_MODE_INDEX (1 << 20)
+#define RADEON_SURF_FMASK (1 << 21)
+
+#define RADEON_SURF_GET(v, field) (((v) >> RADEON_SURF_ ## field ## _SHIFT) & RADEON_SURF_ ## field ## _MASK)
+#define RADEON_SURF_SET(v, field) (((v) & RADEON_SURF_ ## field ## _MASK) << RADEON_SURF_ ## field ## _SHIFT)
+#define RADEON_SURF_CLR(v, field) ((v) & ~(RADEON_SURF_ ## field ## _MASK << RADEON_SURF_ ## field ## _SHIFT))
+
+/* first field up to mode need to match r6 struct so that we can reuse
+ * same function for linear & linear aligned
+ */
+struct radeon_surface_level {
+ uint64_t offset;
+ uint64_t slice_size;
+ uint32_t npix_x;
+ uint32_t npix_y;
+ uint32_t npix_z;
+ uint32_t nblk_x;
+ uint32_t nblk_y;
+ uint32_t nblk_z;
+ uint32_t pitch_bytes;
+ uint32_t mode;
+};
+
+enum si_tiling_mode {
+ SI_TILING_AUTO = 0,
+
+ SI_TILING_COLOR_1D,
+ SI_TILING_COLOR_1D_SCANOUT,
+ SI_TILING_COLOR_2D_8BPP,
+ SI_TILING_COLOR_2D_16BPP,
+ SI_TILING_COLOR_2D_32BPP,
+ SI_TILING_COLOR_2D_64BPP,
+ SI_TILING_COLOR_2D_SCANOUT_16BPP,
+ SI_TILING_COLOR_2D_SCANOUT_32BPP,
+ SI_TILING_COLOR_LINEAR,
+
+ SI_TILING_STENCIL_1D,
+ SI_TILING_STENCIL_2D,
+ SI_TILING_STENCIL_2D_2AA,
+ SI_TILING_STENCIL_2D_4AA,
+ SI_TILING_STENCIL_2D_8AA,
+
+ SI_TILING_DEPTH_1D,
+ SI_TILING_DEPTH_2D,
+ SI_TILING_DEPTH_2D_2AA,
+ SI_TILING_DEPTH_2D_4AA,
+ SI_TILING_DEPTH_2D_8AA,
+
+ SI_TILING_LAST_MODE,
+};
+
+struct radeon_surface {
+ uint32_t npix_x;
+ uint32_t npix_y;
+ uint32_t npix_z;
+ uint32_t blk_w;
+ uint32_t blk_h;
+ uint32_t blk_d;
+ uint32_t array_size;
+ uint32_t last_level;
+ uint32_t bpe;
+ uint32_t nsamples;
+ uint32_t flags;
+ /* Following is updated/fill by the allocator. It's allowed to
+ * set some of the value but they are use as hint and can be
+ * overridden (things lile bankw/bankh on evergreen for
+ * instance).
+ */
+ uint64_t bo_size;
+ uint64_t bo_alignment;
+ /* apply to eg */
+ uint32_t bankw;
+ uint32_t bankh;
+ uint32_t mtilea;
+ uint32_t tile_split;
+ uint32_t stencil_tile_split;
+ uint64_t stencil_offset;
+ struct radeon_surface_level level[RADEON_SURF_MAX_LEVEL];
+ struct radeon_surface_level stencil_level[RADEON_SURF_MAX_LEVEL];
+ uint32_t tiling_index[RADEON_SURF_MAX_LEVEL];
+ uint32_t stencil_tiling_index[RADEON_SURF_MAX_LEVEL];
+};
+
+struct radeon_surface_manager *radeon_surface_manager_new(int fd);
+void radeon_surface_manager_free(struct radeon_surface_manager *surf_man);
+int radeon_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+int radeon_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+
+#endif
diff --git a/chromium/third_party/libdrm/src/rockchip/Android.mk b/chromium/third_party/libdrm/src/rockchip/Android.mk
new file mode 100644
index 00000000000..4569044ea18
--- /dev/null
+++ b/chromium/third_party/libdrm/src/rockchip/Android.mk
@@ -0,0 +1,27 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libdrm_rockchip
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES := rockchip_drm.c
+LOCAL_EXPORT_C_INCLUDE_DIRS += \
+ $(LOCAL_PATH)/rockchip
+
+LOCAL_C_INCLUDES := \
+ $(LIBDRM_TOP) \
+ $(LIBDRM_TOP)/rockchip \
+ $(LIBDRM_TOP)/include/drm
+
+LOCAL_CFLAGS := \
+ -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
+
+LOCAL_COPY_HEADERS := rockchip_drm.h rockchip_drmif.h
+LOCAL_COPY_HEADERS_TO := libdrm
+
+LOCAL_SHARED_LIBRARIES := \
+ libdrm
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/chromium/third_party/libdrm/src/rockchip/Makefile.am b/chromium/third_party/libdrm/src/rockchip/Makefile.am
new file mode 100644
index 00000000000..2ebb82f18e2
--- /dev/null
+++ b/chromium/third_party/libdrm/src/rockchip/Makefile.am
@@ -0,0 +1,20 @@
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/rockchip \
+ $(PTHREADSTUBS_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_rockchip_la_LTLIBRARIES = libdrm_rockchip.la
+libdrm_rockchip_ladir = $(libdir)
+libdrm_rockchip_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libdrm_rockchip_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
+
+libdrm_rockchip_la_SOURCES = \
+ rockchip_drm.c
+
+libdrm_rockchipincludedir = ${includedir}/libdrm
+libdrm_rockchipinclude_HEADERS = rockchip_drmif.h rockchip_drm.h
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm_rockchip.pc
diff --git a/chromium/third_party/libdrm/src/rockchip/libdrm_rockchip.pc.in b/chromium/third_party/libdrm/src/rockchip/libdrm_rockchip.pc.in
new file mode 100644
index 00000000000..13f22ac76af
--- /dev/null
+++ b/chromium/third_party/libdrm/src/rockchip/libdrm_rockchip.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_rockchip
+Description: Userspace interface to rockchip kernel DRM services
+Version: 0.1
+Libs: -L${libdir} -ldrm_rockchip
+Cflags: -I${includedir} -I${includedir}/libdrm
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/rockchip/rockchip_drm.c b/chromium/third_party/libdrm/src/rockchip/rockchip_drm.c
new file mode 100644
index 00000000000..9433d52afe1
--- /dev/null
+++ b/chromium/third_party/libdrm/src/rockchip/rockchip_drm.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) ROCKCHIP, Inc.
+ * Author:yzq<yzq@rock-chips.com>
+ *
+ * based on exynos_drm.c
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <sys/mman.h>
+#include <linux/stddef.h>
+
+#include <xf86drm.h>
+
+#include "rockchip_drm.h"
+#include "rockchip_drmif.h"
+
+/*
+ * Create rockchip drm device object.
+ *
+ * @fd: file descriptor to rockchip drm driver opened.
+ *
+ * if true, return the device object else NULL.
+ */
+struct rockchip_device *rockchip_device_create(int fd)
+{
+ struct rockchip_device *dev;
+
+ dev = calloc(1, sizeof(*dev));
+ if (!dev) {
+ fprintf(stderr, "failed to create device[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ dev->fd = fd;
+
+ return dev;
+}
+
+/*
+ * Destroy rockchip drm device object
+ *
+ * @dev: rockchip drm device object.
+ */
+void rockchip_device_destroy(struct rockchip_device *dev)
+{
+ free(dev);
+}
+
+/*
+ * Create a rockchip buffer object to rockchip drm device.
+ *
+ * @dev: rockchip drm device object.
+ * @size: user-desired size.
+ * flags: user-desired memory type.
+ * user can set one or more types among several types to memory
+ * allocation and cache attribute types. and as default,
+ * ROCKCHIP_BO_NONCONTIG and ROCKCHIP-BO_NONCACHABLE types would
+ * be used.
+ *
+ * if true, return a rockchip buffer object else NULL.
+ */
+struct rockchip_bo *rockchip_bo_create(struct rockchip_device *dev,
+ size_t size, uint32_t flags)
+{
+ struct rockchip_bo *bo;
+ struct drm_rockchip_gem_create req = {
+ .size = size,
+ .flags = flags,
+ };
+
+ if (size == 0) {
+ fprintf(stderr, "invalid size.\n");
+ return NULL;
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo) {
+ fprintf(stderr, "failed to create bo[%s].\n",
+ strerror(errno));
+ goto fail;
+ }
+
+ bo->dev = dev;
+
+ if (drmIoctl(dev->fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE, &req)){
+ fprintf(stderr, "failed to create gem object[%s].\n",
+ strerror(errno));
+ goto err_free_bo;
+ }
+
+ bo->handle = req.handle;
+ bo->size = size;
+ bo->flags = flags;
+
+ return bo;
+
+err_free_bo:
+ free(bo);
+fail:
+ return NULL;
+}
+
+struct rockchip_bo *rockchip_bo_from_handle(struct rockchip_device *dev,
+ uint32_t handle, uint32_t flags, uint32_t size)
+{
+ struct rockchip_bo *bo;
+
+ if (size == 0) {
+ fprintf(stderr, "invalid size.\n");
+ return NULL;
+ }
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo) {
+ fprintf(stderr, "failed to create bo[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ bo->dev = dev;
+ bo->handle = handle;
+ bo->size = size;
+ bo->flags = flags;
+
+ return bo;
+}
+
+/*
+ * Destroy a rockchip buffer object.
+ *
+ * @bo: a rockchip buffer object to be destroyed.
+ */
+void rockchip_bo_destroy(struct rockchip_bo *bo)
+{
+ if (!bo)
+ return;
+
+ if (bo->vaddr)
+ munmap(bo->vaddr, bo->size);
+
+ if (bo->handle) {
+ struct drm_gem_close req = {
+ .handle = bo->handle,
+ };
+
+ drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ }
+
+ free(bo);
+}
+
+
+/*
+ * Get a rockchip buffer object from a gem global object name.
+ *
+ * @dev: a rockchip device object.
+ * @name: a gem global object name exported by another process.
+ *
+ * this interface is used to get a rockchip buffer object from a gem
+ * global object name sent by another process for buffer sharing.
+ *
+ * if true, return a rockchip buffer object else NULL.
+ *
+ */
+struct rockchip_bo *rockchip_bo_from_name(struct rockchip_device *dev,
+ uint32_t name)
+{
+ struct rockchip_bo *bo;
+ struct drm_gem_open req = {
+ .name = name,
+ };
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo) {
+ fprintf(stderr, "failed to allocate bo[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
+ fprintf(stderr, "failed to open gem object[%s].\n",
+ strerror(errno));
+ goto err_free_bo;
+ }
+
+ bo->dev = dev;
+ bo->name = name;
+ bo->handle = req.handle;
+
+ return bo;
+
+err_free_bo:
+ free(bo);
+ return NULL;
+}
+
+/*
+ * Get a gem global object name from a gem object handle.
+ *
+ * @bo: a rockchip buffer object including gem handle.
+ * @name: a gem global object name to be got by kernel driver.
+ *
+ * this interface is used to get a gem global object name from a gem object
+ * handle to a buffer that wants to share it with another process.
+ *
+ * if true, return 0 else negative.
+ */
+int rockchip_bo_get_name(struct rockchip_bo *bo, uint32_t *name)
+{
+ if (!bo->name) {
+ struct drm_gem_flink req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
+ if (ret) {
+ fprintf(stderr, "failed to get gem global name[%s].\n",
+ strerror(errno));
+ return ret;
+ }
+
+ bo->name = req.name;
+ }
+
+ *name = bo->name;
+
+ return 0;
+}
+
+uint32_t rockchip_bo_handle(struct rockchip_bo *bo)
+{
+ return bo->handle;
+}
+
+/*
+ * Mmap a buffer to user space.
+ *
+ * @bo: a rockchip buffer object including a gem object handle to be mmapped
+ * to user space.
+ *
+ * if true, user pointer mmaped else NULL.
+ */
+void *rockchip_bo_map(struct rockchip_bo *bo)
+{
+ if (!bo->vaddr) {
+ struct rockchip_device *dev = bo->dev;
+ struct drm_rockchip_gem_map_off req = {
+ .handle = bo->handle,
+ };
+ int ret;
+
+ ret = drmIoctl(dev->fd, DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET, &req);
+ if (ret) {
+ fprintf(stderr, "failed to ioctl gem map offset[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+
+ bo->vaddr = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, dev->fd, req.offset);
+ if (bo->vaddr == MAP_FAILED) {
+ fprintf(stderr, "failed to mmap buffer[%s].\n",
+ strerror(errno));
+ return NULL;
+ }
+ }
+
+ return bo->vaddr;
+}
diff --git a/chromium/third_party/libdrm/src/rockchip/rockchip_drm.h b/chromium/third_party/libdrm/src/rockchip/rockchip_drm.h
new file mode 100644
index 00000000000..13977d59975
--- /dev/null
+++ b/chromium/third_party/libdrm/src/rockchip/rockchip_drm.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) Fuzhou Rockchip Electronics Co.Ltd
+ * Authors:
+ * Mark Yao <yzq@rock-chips.com>
+ *
+ * based on exynos_drm.h
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ROCKCHIP_DRM_H_
+#define _ROCKCHIP_DRM_H_
+
+#include <stdint.h>
+#include "drm.h"
+
+/**
+ * User-desired buffer creation information structure.
+ *
+ * @size: user-desired memory allocation size.
+ * - this size value would be page-aligned internally.
+ * @flags: user request for setting memory type or cache attributes.
+ * @handle: returned a handle to created gem object.
+ * - this handle will be set by gem module of kernel side.
+ */
+struct drm_rockchip_gem_create {
+ uint64_t size;
+ uint32_t flags;
+ uint32_t handle;
+};
+
+/**
+ * A structure for getting buffer offset.
+ *
+ * @handle: a pointer to gem object created.
+ * @pad: just padding to be 64-bit aligned.
+ * @offset: relatived offset value of the memory region allocated.
+ * - this value should be set by user.
+ */
+struct drm_rockchip_gem_map_off {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+};
+
+#define DRM_ROCKCHIP_GEM_CREATE 0x00
+#define DRM_ROCKCHIP_GEM_MAP_OFFSET 0x01
+
+#define DRM_IOCTL_ROCKCHIP_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ROCKCHIP_GEM_CREATE, struct drm_rockchip_gem_create)
+
+#define DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ROCKCHIP_GEM_MAP_OFFSET, struct drm_rockchip_gem_map_off)
+
+#endif
diff --git a/chromium/third_party/libdrm/src/rockchip/rockchip_drmif.h b/chromium/third_party/libdrm/src/rockchip/rockchip_drmif.h
new file mode 100644
index 00000000000..ec3ff843e39
--- /dev/null
+++ b/chromium/third_party/libdrm/src/rockchip/rockchip_drmif.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) ROCKCHIP, Inc.
+ * Author:yzq<yzq@rock-chips.com>
+ *
+ * based on exynos_drmif.h
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef ROCKCHIP_DRMIF_H_
+#define ROCKCHIP_DRMIF_H_
+
+#include <xf86drm.h>
+#include <stdint.h>
+#include "rockchip_drm.h"
+
+struct rockchip_device {
+ int fd;
+};
+
+/*
+ * Rockchip Buffer Object structure.
+ *
+ * @dev: rockchip device object allocated.
+ * @handle: a gem handle to gem object created.
+ * @flags: indicate memory allocation and cache attribute types.
+ * @size: size to the buffer created.
+ * @vaddr: user space address to a gem buffer mmaped.
+ * @name: a gem global handle from flink request.
+ */
+struct rockchip_bo {
+ struct rockchip_device *dev;
+ uint32_t handle;
+ uint32_t flags;
+ size_t size;
+ void *vaddr;
+ uint32_t name;
+};
+
+/*
+ * device related functions:
+ */
+struct rockchip_device *rockchip_device_create(int fd);
+void rockchip_device_destroy(struct rockchip_device *dev);
+
+/*
+ * buffer-object related functions:
+ */
+struct rockchip_bo *rockchip_bo_create(struct rockchip_device *dev,
+ size_t size, uint32_t flags);
+void rockchip_bo_destroy(struct rockchip_bo *bo);
+struct rockchip_bo *rockchip_bo_from_name(struct rockchip_device *dev,
+ uint32_t name);
+int rockchip_bo_get_name(struct rockchip_bo *bo, uint32_t *name);
+uint32_t rockchip_bo_handle(struct rockchip_bo *bo);
+struct rockchip_bo *rockchip_bo_from_handle(struct rockchip_device *dev,
+ uint32_t handle, uint32_t flags, uint32_t size);
+void *rockchip_bo_map(struct rockchip_bo *bo);
+#endif /* ROCKCHIP_DRMIF_H_ */
diff --git a/chromium/third_party/libdrm/src/tegra/Makefile.am b/chromium/third_party/libdrm/src/tegra/Makefile.am
new file mode 100644
index 00000000000..fb40be55a3b
--- /dev/null
+++ b/chromium/third_party/libdrm/src/tegra/Makefile.am
@@ -0,0 +1,25 @@
+AM_CPPFLAGS = \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/include/drm
+
+AM_CFLAGS = \
+ @PTHREADSTUBS_CFLAGS@ \
+ $(WARN_CFLAGS)
+
+libdrm_tegra_ladir = $(libdir)
+libdrm_tegra_la_LTLIBRARIES = libdrm_tegra.la
+libdrm_tegra_la_LDFLAGS = -version-number 0:0:0 -no-undefined
+libdrm_tegra_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
+
+libdrm_tegra_la_SOURCES = \
+ private.h \
+ tegra.c
+
+libdrm_tegraincludedir = ${includedir}/libdrm
+libdrm_tegrainclude_HEADERS = tegra.h
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libdrm_tegra.pc
+
+TESTS = tegra-symbol-check
+EXTRA_DIST = $(TESTS)
diff --git a/chromium/third_party/libdrm/src/tegra/libdrm_tegra.pc.in b/chromium/third_party/libdrm/src/tegra/libdrm_tegra.pc.in
new file mode 100644
index 00000000000..2e06f49c6fb
--- /dev/null
+++ b/chromium/third_party/libdrm/src/tegra/libdrm_tegra.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_tegra
+Description: Userspace interface to Tegra kernel DRM services
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -ldrm_tegra
+Cflags: -I${includedir} -I${includedir}/libdrm
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/tegra/private.h b/chromium/third_party/libdrm/src/tegra/private.h
new file mode 100644
index 00000000000..bb6c1a51653
--- /dev/null
+++ b/chromium/third_party/libdrm/src/tegra/private.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright © 2012, 2013 Thierry Reding
+ * Copyright © 2013 Erik Faye-Lund
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DRM_TEGRA_PRIVATE_H__
+#define __DRM_TEGRA_PRIVATE_H__ 1
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <libdrm_macros.h>
+#include <xf86atomic.h>
+
+#include "tegra.h"
+
+struct drm_tegra {
+ bool close;
+ int fd;
+};
+
+struct drm_tegra_bo {
+ struct drm_tegra *drm;
+ uint32_t handle;
+ uint32_t offset;
+ uint32_t flags;
+ uint32_t size;
+ atomic_t ref;
+ void *map;
+};
+
+#endif /* __DRM_TEGRA_PRIVATE_H__ */
diff --git a/chromium/third_party/libdrm/src/tegra/tegra-symbol-check b/chromium/third_party/libdrm/src/tegra/tegra-symbol-check
new file mode 100755
index 00000000000..4020831124a
--- /dev/null
+++ b/chromium/third_party/libdrm/src/tegra/tegra-symbol-check
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# The following symbols (past the first five) are taken from the public headers.
+# A list of the latter should be available Makefile.sources/LIBDRM_FREEDRENO_H_FILES
+
+FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_tegra.so} | awk '{print $3}'| while read func; do
+( grep -q "^$func$" || echo $func ) <<EOF
+__bss_start
+_edata
+_end
+_fini
+_init
+drm_tegra_bo_get_flags
+drm_tegra_bo_get_handle
+drm_tegra_bo_get_tiling
+drm_tegra_bo_map
+drm_tegra_bo_new
+drm_tegra_bo_ref
+drm_tegra_bo_set_flags
+drm_tegra_bo_set_tiling
+drm_tegra_bo_unmap
+drm_tegra_bo_unref
+drm_tegra_bo_wrap
+drm_tegra_close
+drm_tegra_new
+EOF
+done)
+
+test ! -n "$FUNCS" || echo $FUNCS
+test ! -n "$FUNCS"
diff --git a/chromium/third_party/libdrm/src/tegra/tegra.c b/chromium/third_party/libdrm/src/tegra/tegra.c
new file mode 100644
index 00000000000..f7dc89ad098
--- /dev/null
+++ b/chromium/third_party/libdrm/src/tegra/tegra.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright © 2012, 2013 Thierry Reding
+ * Copyright © 2013 Erik Faye-Lund
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/mman.h>
+
+#include <xf86drm.h>
+
+#include <tegra_drm.h>
+
+#include "private.h"
+
+static void drm_tegra_bo_free(struct drm_tegra_bo *bo)
+{
+ struct drm_tegra *drm = bo->drm;
+ struct drm_gem_close args;
+
+ if (bo->map)
+ munmap(bo->map, bo->size);
+
+ memset(&args, 0, sizeof(args));
+ args.handle = bo->handle;
+
+ drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &args);
+
+ free(bo);
+}
+
+static int drm_tegra_wrap(struct drm_tegra **drmp, int fd, bool close)
+{
+ struct drm_tegra *drm;
+
+ if (fd < 0 || !drmp)
+ return -EINVAL;
+
+ drm = calloc(1, sizeof(*drm));
+ if (!drm)
+ return -ENOMEM;
+
+ drm->close = close;
+ drm->fd = fd;
+
+ *drmp = drm;
+
+ return 0;
+}
+
+int drm_tegra_new(struct drm_tegra **drmp, int fd)
+{
+ bool supported = false;
+ drmVersionPtr version;
+
+ version = drmGetVersion(fd);
+ if (!version)
+ return -ENOMEM;
+
+ if (!strncmp(version->name, "tegra", version->name_len))
+ supported = true;
+
+ drmFreeVersion(version);
+
+ if (!supported)
+ return -ENOTSUP;
+
+ return drm_tegra_wrap(drmp, fd, false);
+}
+
+void drm_tegra_close(struct drm_tegra *drm)
+{
+ if (!drm)
+ return;
+
+ if (drm->close)
+ close(drm->fd);
+
+ free(drm);
+}
+
+int drm_tegra_bo_new(struct drm_tegra_bo **bop, struct drm_tegra *drm,
+ uint32_t flags, uint32_t size)
+{
+ struct drm_tegra_gem_create args;
+ struct drm_tegra_bo *bo;
+ int err;
+
+ if (!drm || size == 0 || !bop)
+ return -EINVAL;
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo)
+ return -ENOMEM;
+
+ atomic_set(&bo->ref, 1);
+ bo->flags = flags;
+ bo->size = size;
+ bo->drm = drm;
+
+ memset(&args, 0, sizeof(args));
+ args.flags = flags;
+ args.size = size;
+
+ err = drmCommandWriteRead(drm->fd, DRM_TEGRA_GEM_CREATE, &args,
+ sizeof(args));
+ if (err < 0) {
+ err = -errno;
+ free(bo);
+ return err;
+ }
+
+ bo->handle = args.handle;
+
+ *bop = bo;
+
+ return 0;
+}
+
+int drm_tegra_bo_wrap(struct drm_tegra_bo **bop, struct drm_tegra *drm,
+ uint32_t handle, uint32_t flags, uint32_t size)
+{
+ struct drm_tegra_bo *bo;
+
+ if (!drm || !bop)
+ return -EINVAL;
+
+ bo = calloc(1, sizeof(*bo));
+ if (!bo)
+ return -ENOMEM;
+
+ atomic_set(&bo->ref, 1);
+ bo->handle = handle;
+ bo->flags = flags;
+ bo->size = size;
+ bo->drm = drm;
+
+ *bop = bo;
+
+ return 0;
+}
+
+struct drm_tegra_bo *drm_tegra_bo_ref(struct drm_tegra_bo *bo)
+{
+ if (bo)
+ atomic_inc(&bo->ref);
+
+ return bo;
+}
+
+void drm_tegra_bo_unref(struct drm_tegra_bo *bo)
+{
+ if (bo && atomic_dec_and_test(&bo->ref))
+ drm_tegra_bo_free(bo);
+}
+
+int drm_tegra_bo_get_handle(struct drm_tegra_bo *bo, uint32_t *handle)
+{
+ if (!bo || !handle)
+ return -EINVAL;
+
+ *handle = bo->handle;
+
+ return 0;
+}
+
+int drm_tegra_bo_map(struct drm_tegra_bo *bo, void **ptr)
+{
+ struct drm_tegra *drm = bo->drm;
+
+ if (!bo->map) {
+ struct drm_tegra_gem_mmap args;
+ int err;
+
+ memset(&args, 0, sizeof(args));
+ args.handle = bo->handle;
+
+ err = drmCommandWriteRead(drm->fd, DRM_TEGRA_GEM_MMAP, &args,
+ sizeof(args));
+ if (err < 0)
+ return -errno;
+
+ bo->offset = args.offset;
+
+ bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ drm->fd, bo->offset);
+ if (bo->map == MAP_FAILED) {
+ bo->map = NULL;
+ return -errno;
+ }
+ }
+
+ if (ptr)
+ *ptr = bo->map;
+
+ return 0;
+}
+
+int drm_tegra_bo_unmap(struct drm_tegra_bo *bo)
+{
+ if (!bo)
+ return -EINVAL;
+
+ if (!bo->map)
+ return 0;
+
+ if (munmap(bo->map, bo->size))
+ return -errno;
+
+ bo->map = NULL;
+
+ return 0;
+}
+
+int drm_tegra_bo_get_flags(struct drm_tegra_bo *bo, uint32_t *flags)
+{
+ struct drm_tegra_gem_get_flags args;
+ struct drm_tegra *drm = bo->drm;
+ int err;
+
+ if (!bo)
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(args));
+ args.handle = bo->handle;
+
+ err = drmCommandWriteRead(drm->fd, DRM_TEGRA_GEM_GET_FLAGS, &args,
+ sizeof(args));
+ if (err < 0)
+ return -errno;
+
+ if (flags)
+ *flags = args.flags;
+
+ return 0;
+}
+
+int drm_tegra_bo_set_flags(struct drm_tegra_bo *bo, uint32_t flags)
+{
+ struct drm_tegra_gem_get_flags args;
+ struct drm_tegra *drm = bo->drm;
+ int err;
+
+ if (!bo)
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(args));
+ args.handle = bo->handle;
+ args.flags = flags;
+
+ err = drmCommandWriteRead(drm->fd, DRM_TEGRA_GEM_SET_FLAGS, &args,
+ sizeof(args));
+ if (err < 0)
+ return -errno;
+
+ return 0;
+}
+
+int drm_tegra_bo_get_tiling(struct drm_tegra_bo *bo,
+ struct drm_tegra_bo_tiling *tiling)
+{
+ struct drm_tegra_gem_get_tiling args;
+ struct drm_tegra *drm = bo->drm;
+ int err;
+
+ if (!bo)
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(args));
+ args.handle = bo->handle;
+
+ err = drmCommandWriteRead(drm->fd, DRM_TEGRA_GEM_GET_TILING, &args,
+ sizeof(args));
+ if (err < 0)
+ return -errno;
+
+ if (tiling) {
+ tiling->mode = args.mode;
+ tiling->value = args.value;
+ }
+
+ return 0;
+}
+
+int drm_tegra_bo_set_tiling(struct drm_tegra_bo *bo,
+ const struct drm_tegra_bo_tiling *tiling)
+{
+ struct drm_tegra_gem_set_tiling args;
+ struct drm_tegra *drm = bo->drm;
+ int err;
+
+ if (!bo)
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(args));
+ args.handle = bo->handle;
+ args.mode = tiling->mode;
+ args.value = tiling->value;
+
+ err = drmCommandWriteRead(drm->fd, DRM_TEGRA_GEM_SET_TILING, &args,
+ sizeof(args));
+ if (err < 0)
+ return -errno;
+
+ return 0;
+}
diff --git a/chromium/third_party/libdrm/src/tegra/tegra.h b/chromium/third_party/libdrm/src/tegra/tegra.h
new file mode 100644
index 00000000000..31b0995ad69
--- /dev/null
+++ b/chromium/third_party/libdrm/src/tegra/tegra.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright © 2012, 2013 Thierry Reding
+ * Copyright © 2013 Erik Faye-Lund
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DRM_TEGRA_H__
+#define __DRM_TEGRA_H__ 1
+
+#include <stdint.h>
+#include <stdlib.h>
+
+struct drm_tegra_bo;
+struct drm_tegra;
+
+int drm_tegra_new(struct drm_tegra **drmp, int fd);
+void drm_tegra_close(struct drm_tegra *drm);
+
+int drm_tegra_bo_new(struct drm_tegra_bo **bop, struct drm_tegra *drm,
+ uint32_t flags, uint32_t size);
+int drm_tegra_bo_wrap(struct drm_tegra_bo **bop, struct drm_tegra *drm,
+ uint32_t handle, uint32_t flags, uint32_t size);
+struct drm_tegra_bo *drm_tegra_bo_ref(struct drm_tegra_bo *bo);
+void drm_tegra_bo_unref(struct drm_tegra_bo *bo);
+int drm_tegra_bo_get_handle(struct drm_tegra_bo *bo, uint32_t *handle);
+int drm_tegra_bo_map(struct drm_tegra_bo *bo, void **ptr);
+int drm_tegra_bo_unmap(struct drm_tegra_bo *bo);
+
+int drm_tegra_bo_get_flags(struct drm_tegra_bo *bo, uint32_t *flags);
+int drm_tegra_bo_set_flags(struct drm_tegra_bo *bo, uint32_t flags);
+
+struct drm_tegra_bo_tiling {
+ uint32_t mode;
+ uint32_t value;
+};
+
+int drm_tegra_bo_get_tiling(struct drm_tegra_bo *bo,
+ struct drm_tegra_bo_tiling *tiling);
+int drm_tegra_bo_set_tiling(struct drm_tegra_bo *bo,
+ const struct drm_tegra_bo_tiling *tiling);
+
+#endif /* __DRM_TEGRA_H__ */
diff --git a/chromium/third_party/libdrm/src/util_double_list.h b/chromium/third_party/libdrm/src/util_double_list.h
new file mode 100644
index 00000000000..7e48b26cb0c
--- /dev/null
+++ b/chromium/third_party/libdrm/src/util_double_list.h
@@ -0,0 +1,143 @@
+/*
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+/**
+ * \file
+ * List macros heavily inspired by the Linux kernel
+ * list handling. No list looping yet.
+ *
+ * Is not threadsafe, so common operations need to
+ * be protected using an external mutex.
+ */
+#ifndef _U_DOUBLE_LIST_H_
+#define _U_DOUBLE_LIST_H_
+
+#include <stddef.h>
+
+struct list_head
+{
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+static inline void list_inithead(struct list_head *item)
+{
+ item->prev = item;
+ item->next = item;
+}
+
+static inline void list_add(struct list_head *item, struct list_head *list)
+{
+ item->prev = list;
+ item->next = list->next;
+ list->next->prev = item;
+ list->next = item;
+}
+
+static inline void list_addtail(struct list_head *item, struct list_head *list)
+{
+ item->next = list;
+ item->prev = list->prev;
+ list->prev->next = item;
+ list->prev = item;
+}
+
+static inline void list_replace(struct list_head *from, struct list_head *to)
+{
+ to->prev = from->prev;
+ to->next = from->next;
+ from->next->prev = to;
+ from->prev->next = to;
+}
+
+static inline void list_del(struct list_head *item)
+{
+ item->prev->next = item->next;
+ item->next->prev = item->prev;
+}
+
+static inline void list_delinit(struct list_head *item)
+{
+ item->prev->next = item->next;
+ item->next->prev = item->prev;
+ item->next = item;
+ item->prev = item;
+}
+
+#define LIST_INITHEAD(__item) list_inithead(__item)
+#define LIST_ADD(__item, __list) list_add(__item, __list)
+#define LIST_ADDTAIL(__item, __list) list_addtail(__item, __list)
+#define LIST_REPLACE(__from, __to) list_replace(__from, __to)
+#define LIST_DEL(__item) list_del(__item)
+#define LIST_DELINIT(__item) list_delinit(__item)
+
+#define LIST_ENTRY(__type, __item, __field) \
+ ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
+
+#define LIST_FIRST_ENTRY(__ptr, __type, __field) \
+ LIST_ENTRY(__type, (__ptr)->next, __field)
+
+#define LIST_LAST_ENTRY(__ptr, __type, __field) \
+ LIST_ENTRY(__type, (__ptr)->prev, __field)
+
+#define LIST_IS_EMPTY(__list) \
+ ((__list)->next == (__list))
+
+#ifndef container_of
+#define container_of(ptr, sample, member) \
+ (void *)((char *)(ptr) \
+ - ((char *)&((typeof(sample))0)->member))
+#endif
+
+#define LIST_FOR_EACH_ENTRY(pos, head, member) \
+ for (pos = container_of((head)->next, pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.next, pos, member))
+
+#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
+ for (pos = container_of((head)->next, pos, member), \
+ storage = container_of(pos->member.next, pos, member); \
+ &pos->member != (head); \
+ pos = storage, storage = container_of(storage->member.next, storage, member))
+
+#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
+ for (pos = container_of((head)->prev, pos, member), \
+ storage = container_of(pos->member.prev, pos, member); \
+ &pos->member != (head); \
+ pos = storage, storage = container_of(storage->member.prev, storage, member))
+
+#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
+ for (pos = container_of((start), pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.next, pos, member))
+
+#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
+ for (pos = container_of((start), pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.prev, pos, member))
+
+#endif /*_U_DOUBLE_LIST_H_*/
diff --git a/chromium/third_party/libdrm/src/util_math.h b/chromium/third_party/libdrm/src/util_math.h
new file mode 100644
index 00000000000..02b15a8e5a9
--- /dev/null
+++ b/chromium/third_party/libdrm/src/util_math.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#ifndef _UTIL_MATH_H_
+#define _UTIL_MATH_H_
+
+#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
+#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
+#define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C))
+
+#define __align_mask(value, mask) (((value) + (mask)) & ~(mask))
+#define ALIGN(value, alignment) __align_mask(value, (typeof(value))((alignment) - 1))
+
+#endif /*_UTIL_MATH_H_*/
diff --git a/chromium/third_party/libdrm/src/vc4/Makefile.am b/chromium/third_party/libdrm/src/vc4/Makefile.am
new file mode 100644
index 00000000000..7e486b4d8ea
--- /dev/null
+++ b/chromium/third_party/libdrm/src/vc4/Makefile.am
@@ -0,0 +1,34 @@
+# Copyright © 2016 Broadcom
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+include Makefile.sources
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ $(PTHREADSTUBS_CFLAGS) \
+ $(VALGRIND_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_vc4includedir = ${includedir}/libdrm
+libdrm_vc4include_HEADERS = $(LIBDRM_VC4_H_FILES)
+
+pkgconfig_DATA = libdrm_vc4.pc
diff --git a/chromium/third_party/libdrm/src/vc4/Makefile.sources b/chromium/third_party/libdrm/src/vc4/Makefile.sources
new file mode 100644
index 00000000000..8bf97ff1e80
--- /dev/null
+++ b/chromium/third_party/libdrm/src/vc4/Makefile.sources
@@ -0,0 +1,3 @@
+LIBDRM_VC4_H_FILES := \
+ vc4_packet.h \
+ vc4_qpu_defines.h
diff --git a/chromium/third_party/libdrm/src/vc4/libdrm_vc4.pc.in b/chromium/third_party/libdrm/src/vc4/libdrm_vc4.pc.in
new file mode 100644
index 00000000000..a92678ed6c4
--- /dev/null
+++ b/chromium/third_party/libdrm/src/vc4/libdrm_vc4.pc.in
@@ -0,0 +1,9 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_vc4
+Description: Userspace interface to vc4 kernel DRM services
+Version: @PACKAGE_VERSION@
+Requires.private: libdrm
diff --git a/chromium/third_party/libdrm/src/vc4/vc4_packet.h b/chromium/third_party/libdrm/src/vc4/vc4_packet.h
new file mode 100644
index 00000000000..e18e0bdf6d8
--- /dev/null
+++ b/chromium/third_party/libdrm/src/vc4/vc4_packet.h
@@ -0,0 +1,397 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VC4_PACKET_H
+#define VC4_PACKET_H
+
+enum vc4_packet {
+ VC4_PACKET_HALT = 0,
+ VC4_PACKET_NOP = 1,
+
+ VC4_PACKET_FLUSH = 4,
+ VC4_PACKET_FLUSH_ALL = 5,
+ VC4_PACKET_START_TILE_BINNING = 6,
+ VC4_PACKET_INCREMENT_SEMAPHORE = 7,
+ VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
+
+ VC4_PACKET_BRANCH = 16,
+ VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
+ VC4_PACKET_RETURN_FROM_SUB_LIST = 18,
+
+ VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
+ VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
+ VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
+ VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
+ VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
+ VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
+
+ VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
+ VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
+
+ VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
+ VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
+
+ VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
+
+ VC4_PACKET_GL_SHADER_STATE = 64,
+ VC4_PACKET_NV_SHADER_STATE = 65,
+ VC4_PACKET_VG_SHADER_STATE = 66,
+
+ VC4_PACKET_CONFIGURATION_BITS = 96,
+ VC4_PACKET_FLAT_SHADE_FLAGS = 97,
+ VC4_PACKET_POINT_SIZE = 98,
+ VC4_PACKET_LINE_WIDTH = 99,
+ VC4_PACKET_RHT_X_BOUNDARY = 100,
+ VC4_PACKET_DEPTH_OFFSET = 101,
+ VC4_PACKET_CLIP_WINDOW = 102,
+ VC4_PACKET_VIEWPORT_OFFSET = 103,
+ VC4_PACKET_Z_CLIPPING = 104,
+ VC4_PACKET_CLIPPER_XY_SCALING = 105,
+ VC4_PACKET_CLIPPER_Z_SCALING = 106,
+
+ VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
+ VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
+ VC4_PACKET_CLEAR_COLORS = 114,
+ VC4_PACKET_TILE_COORDINATES = 115,
+
+ /* Not an actual hardware packet -- this is what we use to put
+ * references to GEM bos in the command stream, since we need the u32
+ * int the actual address packet in order to store the offset from the
+ * start of the BO.
+ */
+ VC4_PACKET_GEM_HANDLES = 254,
+} __attribute__ ((__packed__));
+
+#define VC4_PACKET_HALT_SIZE 1
+#define VC4_PACKET_NOP_SIZE 1
+#define VC4_PACKET_FLUSH_SIZE 1
+#define VC4_PACKET_FLUSH_ALL_SIZE 1
+#define VC4_PACKET_START_TILE_BINNING_SIZE 1
+#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1
+#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1
+#define VC4_PACKET_BRANCH_SIZE 5
+#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5
+#define VC4_PACKET_RETURN_FROM_SUB_LIST_SIZE 1
+#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1
+#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1
+#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE 5
+#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE 5
+#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7
+#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7
+#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14
+#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10
+#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE 1
+#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE 1
+#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2
+#define VC4_PACKET_GL_SHADER_STATE_SIZE 5
+#define VC4_PACKET_NV_SHADER_STATE_SIZE 5
+#define VC4_PACKET_VG_SHADER_STATE_SIZE 5
+#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4
+#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5
+#define VC4_PACKET_POINT_SIZE_SIZE 5
+#define VC4_PACKET_LINE_WIDTH_SIZE 5
+#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3
+#define VC4_PACKET_DEPTH_OFFSET_SIZE 5
+#define VC4_PACKET_CLIP_WINDOW_SIZE 9
+#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5
+#define VC4_PACKET_Z_CLIPPING_SIZE 9
+#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9
+#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9
+#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16
+#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11
+#define VC4_PACKET_CLEAR_COLORS_SIZE 14
+#define VC4_PACKET_TILE_COORDINATES_SIZE 3
+#define VC4_PACKET_GEM_HANDLES_SIZE 9
+
+#define VC4_MASK(high, low) (((1 << ((high) - (low) + 1)) - 1) << (low))
+/* Using the GNU statement expression extension */
+#define VC4_SET_FIELD(value, field) \
+ ({ \
+ uint32_t fieldval = (value) << field ## _SHIFT; \
+ assert((fieldval & ~ field ## _MASK) == 0); \
+ fieldval & field ## _MASK; \
+ })
+
+#define VC4_GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
+
+/** @{
+ * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
+*/
+#define VC4_TILING_FORMAT_LINEAR 0
+#define VC4_TILING_FORMAT_T 1
+#define VC4_TILING_FORMAT_LT 2
+/** @} */
+
+/** @{
+ *
+ * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
+ * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
+ */
+#define VC4_LOADSTORE_FULL_RES_EOF (1 << 3)
+#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL (1 << 2)
+#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS (1 << 1)
+#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR (1 << 0)
+
+/** @{
+ *
+ * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
+ */
+
+#define VC4_LOADSTORE_TILE_BUFFER_EOF (1 << 3)
+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK (1 << 2)
+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS (1 << 1)
+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR (1 << 0)
+
+/** @} */
+
+/** @{
+ *
+ * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
+ */
+#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR (1 << 15)
+#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR (1 << 14)
+#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR (1 << 13)
+#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP (1 << 12)
+
+#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8)
+#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8
+#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0
+#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1
+#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2
+/** @} */
+
+/** @{
+ *
+ * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
+ */
+#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6)
+#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6
+#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6)
+#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6)
+#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6)
+
+/** The values of the field are VC4_TILING_FORMAT_* */
+#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4)
+#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4
+
+#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0)
+#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0
+#define VC4_LOADSTORE_TILE_BUFFER_NONE 0
+#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1
+#define VC4_LOADSTORE_TILE_BUFFER_ZS 2
+#define VC4_LOADSTORE_TILE_BUFFER_Z 3
+#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4
+#define VC4_LOADSTORE_TILE_BUFFER_FULL 5
+/** @} */
+
+#define VC4_INDEX_BUFFER_U8 (0 << 4)
+#define VC4_INDEX_BUFFER_U16 (1 << 4)
+
+/* This flag is only present in NV shader state. */
+#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS (1 << 3)
+#define VC4_SHADER_FLAG_ENABLE_CLIPPING (1 << 2)
+#define VC4_SHADER_FLAG_VS_POINT_SIZE (1 << 1)
+#define VC4_SHADER_FLAG_FS_SINGLE_THREAD (1 << 0)
+
+/** @{ byte 2 of config bits. */
+#define VC4_CONFIG_BITS_EARLY_Z_UPDATE (1 << 1)
+#define VC4_CONFIG_BITS_EARLY_Z (1 << 0)
+/** @} */
+
+/** @{ byte 1 of config bits. */
+#define VC4_CONFIG_BITS_Z_UPDATE (1 << 7)
+/** same values in this 3-bit field as PIPE_FUNC_* */
+#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4
+#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE (1 << 3)
+
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1)
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1)
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1)
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1)
+
+#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT (1 << 0)
+/** @} */
+
+/** @{ byte 0 of config bits. */
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6)
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6)
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_MASK (3 << 6)
+
+#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES (1 << 4)
+#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET (1 << 3)
+#define VC4_CONFIG_BITS_CW_PRIMITIVES (1 << 2)
+#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK (1 << 1)
+#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT (1 << 0)
+/** @} */
+
+/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
+#define VC4_BIN_CONFIG_DB_NON_MS (1 << 7)
+
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5)
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3
+
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3)
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3
+
+#define VC4_BIN_CONFIG_AUTO_INIT_TSDA (1 << 2)
+#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT (1 << 1)
+#define VC4_BIN_CONFIG_MS_MODE_4X (1 << 0)
+/** @} */
+
+/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
+#define VC4_RENDER_CONFIG_DB_NON_MS (1 << 12)
+#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE (1 << 11)
+#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G (1 << 10)
+#define VC4_RENDER_CONFIG_COVERAGE_MODE (1 << 9)
+#define VC4_RENDER_CONFIG_ENABLE_VG_MASK (1 << 8)
+
+/** The values of the field are VC4_TILING_FORMAT_* */
+#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6)
+#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6
+
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4)
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4)
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4)
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_MASK (3 << 4)
+
+#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2)
+#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2
+#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0
+#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1
+#define VC4_RENDER_CONFIG_FORMAT_BGR565 2
+
+#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT (1 << 1)
+#define VC4_RENDER_CONFIG_MS_MODE_4X (1 << 0)
+
+#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4)
+#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0)
+
+enum vc4_texture_data_type {
+ VC4_TEXTURE_TYPE_RGBA8888 = 0,
+ VC4_TEXTURE_TYPE_RGBX8888 = 1,
+ VC4_TEXTURE_TYPE_RGBA4444 = 2,
+ VC4_TEXTURE_TYPE_RGBA5551 = 3,
+ VC4_TEXTURE_TYPE_RGB565 = 4,
+ VC4_TEXTURE_TYPE_LUMINANCE = 5,
+ VC4_TEXTURE_TYPE_ALPHA = 6,
+ VC4_TEXTURE_TYPE_LUMALPHA = 7,
+ VC4_TEXTURE_TYPE_ETC1 = 8,
+ VC4_TEXTURE_TYPE_S16F = 9,
+ VC4_TEXTURE_TYPE_S8 = 10,
+ VC4_TEXTURE_TYPE_S16 = 11,
+ VC4_TEXTURE_TYPE_BW1 = 12,
+ VC4_TEXTURE_TYPE_A4 = 13,
+ VC4_TEXTURE_TYPE_A1 = 14,
+ VC4_TEXTURE_TYPE_RGBA64 = 15,
+ VC4_TEXTURE_TYPE_RGBA32R = 16,
+ VC4_TEXTURE_TYPE_YUV422R = 17,
+};
+
+#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12)
+#define VC4_TEX_P0_OFFSET_SHIFT 12
+#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10)
+#define VC4_TEX_P0_CSWIZ_SHIFT 10
+#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9)
+#define VC4_TEX_P0_CMMODE_SHIFT 9
+#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8)
+#define VC4_TEX_P0_FLIPY_SHIFT 8
+#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4)
+#define VC4_TEX_P0_TYPE_SHIFT 4
+#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0)
+#define VC4_TEX_P0_MIPLVLS_SHIFT 0
+
+#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31)
+#define VC4_TEX_P1_TYPE4_SHIFT 31
+#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20)
+#define VC4_TEX_P1_HEIGHT_SHIFT 20
+#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19)
+#define VC4_TEX_P1_ETCFLIP_SHIFT 19
+#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8)
+#define VC4_TEX_P1_WIDTH_SHIFT 8
+
+#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7)
+#define VC4_TEX_P1_MAGFILT_SHIFT 7
+# define VC4_TEX_P1_MAGFILT_LINEAR 0
+# define VC4_TEX_P1_MAGFILT_NEAREST 1
+
+#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4)
+#define VC4_TEX_P1_MINFILT_SHIFT 4
+# define VC4_TEX_P1_MINFILT_LINEAR 0
+# define VC4_TEX_P1_MINFILT_NEAREST 1
+# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2
+# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3
+# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4
+# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5
+
+#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2)
+#define VC4_TEX_P1_WRAP_T_SHIFT 2
+#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0)
+#define VC4_TEX_P1_WRAP_S_SHIFT 0
+# define VC4_TEX_P1_WRAP_REPEAT 0
+# define VC4_TEX_P1_WRAP_CLAMP 1
+# define VC4_TEX_P1_WRAP_MIRROR 2
+# define VC4_TEX_P1_WRAP_BORDER 3
+
+#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30)
+#define VC4_TEX_P2_PTYPE_SHIFT 30
+# define VC4_TEX_P2_PTYPE_IGNORED 0
+# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1
+# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2
+# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3
+
+/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
+#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12)
+#define VC4_TEX_P2_CMST_SHIFT 12
+#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0)
+#define VC4_TEX_P2_BSLOD_SHIFT 0
+
+/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
+#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12)
+#define VC4_TEX_P2_CHEIGHT_SHIFT 12
+#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0)
+#define VC4_TEX_P2_CWIDTH_SHIFT 0
+
+/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
+#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12)
+#define VC4_TEX_P2_CYOFF_SHIFT 12
+#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0)
+#define VC4_TEX_P2_CXOFF_SHIFT 0
+
+#endif /* VC4_PACKET_H */
diff --git a/chromium/third_party/libdrm/src/vc4/vc4_qpu_defines.h b/chromium/third_party/libdrm/src/vc4/vc4_qpu_defines.h
new file mode 100644
index 00000000000..26fcf505fe4
--- /dev/null
+++ b/chromium/third_party/libdrm/src/vc4/vc4_qpu_defines.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VC4_QPU_DEFINES_H
+#define VC4_QPU_DEFINES_H
+
+enum qpu_op_add {
+ QPU_A_NOP,
+ QPU_A_FADD,
+ QPU_A_FSUB,
+ QPU_A_FMIN,
+ QPU_A_FMAX,
+ QPU_A_FMINABS,
+ QPU_A_FMAXABS,
+ QPU_A_FTOI,
+ QPU_A_ITOF,
+ QPU_A_ADD = 12,
+ QPU_A_SUB,
+ QPU_A_SHR,
+ QPU_A_ASR,
+ QPU_A_ROR,
+ QPU_A_SHL,
+ QPU_A_MIN,
+ QPU_A_MAX,
+ QPU_A_AND,
+ QPU_A_OR,
+ QPU_A_XOR,
+ QPU_A_NOT,
+ QPU_A_CLZ,
+ QPU_A_V8ADDS = 30,
+ QPU_A_V8SUBS = 31,
+};
+
+enum qpu_op_mul {
+ QPU_M_NOP,
+ QPU_M_FMUL,
+ QPU_M_MUL24,
+ QPU_M_V8MULD,
+ QPU_M_V8MIN,
+ QPU_M_V8MAX,
+ QPU_M_V8ADDS,
+ QPU_M_V8SUBS,
+};
+
+enum qpu_raddr {
+ QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */
+ /* 0-31 are the plain regfile a or b fields */
+ QPU_R_UNIF = 32,
+ QPU_R_VARY = 35,
+ QPU_R_ELEM_QPU = 38,
+ QPU_R_NOP,
+ QPU_R_XY_PIXEL_COORD = 41,
+ QPU_R_MS_REV_FLAGS = 42,
+ QPU_R_VPM = 48,
+ QPU_R_VPM_LD_BUSY,
+ QPU_R_VPM_LD_WAIT,
+ QPU_R_MUTEX_ACQUIRE,
+};
+
+enum qpu_waddr {
+ /* 0-31 are the plain regfile a or b fields */
+ QPU_W_ACC0 = 32, /* aka r0 */
+ QPU_W_ACC1,
+ QPU_W_ACC2,
+ QPU_W_ACC3,
+ QPU_W_TMU_NOSWAP,
+ QPU_W_ACC5,
+ QPU_W_HOST_INT,
+ QPU_W_NOP,
+ QPU_W_UNIFORMS_ADDRESS,
+ QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */
+ QPU_W_MS_FLAGS = 42,
+ QPU_W_REV_FLAG = 42,
+ QPU_W_TLB_STENCIL_SETUP = 43,
+ QPU_W_TLB_Z,
+ QPU_W_TLB_COLOR_MS,
+ QPU_W_TLB_COLOR_ALL,
+ QPU_W_TLB_ALPHA_MASK,
+ QPU_W_VPM,
+ QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */
+ QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */
+ QPU_W_MUTEX_RELEASE,
+ QPU_W_SFU_RECIP,
+ QPU_W_SFU_RECIPSQRT,
+ QPU_W_SFU_EXP,
+ QPU_W_SFU_LOG,
+ QPU_W_TMU0_S,
+ QPU_W_TMU0_T,
+ QPU_W_TMU0_R,
+ QPU_W_TMU0_B,
+ QPU_W_TMU1_S,
+ QPU_W_TMU1_T,
+ QPU_W_TMU1_R,
+ QPU_W_TMU1_B,
+};
+
+enum qpu_sig_bits {
+ QPU_SIG_SW_BREAKPOINT,
+ QPU_SIG_NONE,
+ QPU_SIG_THREAD_SWITCH,
+ QPU_SIG_PROG_END,
+ QPU_SIG_WAIT_FOR_SCOREBOARD,
+ QPU_SIG_SCOREBOARD_UNLOCK,
+ QPU_SIG_LAST_THREAD_SWITCH,
+ QPU_SIG_COVERAGE_LOAD,
+ QPU_SIG_COLOR_LOAD,
+ QPU_SIG_COLOR_LOAD_END,
+ QPU_SIG_LOAD_TMU0,
+ QPU_SIG_LOAD_TMU1,
+ QPU_SIG_ALPHA_MASK_LOAD,
+ QPU_SIG_SMALL_IMM,
+ QPU_SIG_LOAD_IMM,
+ QPU_SIG_BRANCH
+};
+
+enum qpu_mux {
+ /* hardware mux values */
+ QPU_MUX_R0,
+ QPU_MUX_R1,
+ QPU_MUX_R2,
+ QPU_MUX_R3,
+ QPU_MUX_R4,
+ QPU_MUX_R5,
+ QPU_MUX_A,
+ QPU_MUX_B,
+
+ /**
+ * Non-hardware mux value, stores a small immediate field to be
+ * programmed into raddr_b in the qpu_reg.index.
+ */
+ QPU_MUX_SMALL_IMM,
+};
+
+enum qpu_cond {
+ QPU_COND_NEVER,
+ QPU_COND_ALWAYS,
+ QPU_COND_ZS,
+ QPU_COND_ZC,
+ QPU_COND_NS,
+ QPU_COND_NC,
+ QPU_COND_CS,
+ QPU_COND_CC,
+};
+
+enum qpu_pack_mul {
+ QPU_PACK_MUL_NOP,
+ QPU_PACK_MUL_8888 = 3, /* replicated to each 8 bits of the 32-bit dst. */
+ QPU_PACK_MUL_8A,
+ QPU_PACK_MUL_8B,
+ QPU_PACK_MUL_8C,
+ QPU_PACK_MUL_8D,
+};
+
+enum qpu_pack_a {
+ QPU_PACK_A_NOP,
+ /* convert to 16 bit float if float input, or to int16. */
+ QPU_PACK_A_16A,
+ QPU_PACK_A_16B,
+ /* replicated to each 8 bits of the 32-bit dst. */
+ QPU_PACK_A_8888,
+ /* Convert to 8-bit unsigned int. */
+ QPU_PACK_A_8A,
+ QPU_PACK_A_8B,
+ QPU_PACK_A_8C,
+ QPU_PACK_A_8D,
+
+ /* Saturating variants of the previous instructions. */
+ QPU_PACK_A_32_SAT, /* int-only */
+ QPU_PACK_A_16A_SAT, /* int or float */
+ QPU_PACK_A_16B_SAT,
+ QPU_PACK_A_8888_SAT,
+ QPU_PACK_A_8A_SAT,
+ QPU_PACK_A_8B_SAT,
+ QPU_PACK_A_8C_SAT,
+ QPU_PACK_A_8D_SAT,
+};
+
+enum qpu_unpack {
+ QPU_UNPACK_NOP,
+ QPU_UNPACK_16A,
+ QPU_UNPACK_16B,
+ QPU_UNPACK_8D_REP,
+ QPU_UNPACK_8A,
+ QPU_UNPACK_8B,
+ QPU_UNPACK_8C,
+ QPU_UNPACK_8D,
+};
+
+#define QPU_MASK(high, low) ((((uint64_t)1<<((high)-(low)+1))-1)<<(low))
+/* Using the GNU statement expression extension */
+#define QPU_SET_FIELD(value, field) \
+ ({ \
+ uint64_t fieldval = (uint64_t)(value) << field ## _SHIFT; \
+ assert((fieldval & ~ field ## _MASK) == 0); \
+ fieldval & field ## _MASK; \
+ })
+
+#define QPU_GET_FIELD(word, field) ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT))
+
+#define QPU_UPDATE_FIELD(inst, value, field) \
+ (((inst) & ~(field ## _MASK)) | QPU_SET_FIELD(value, field))
+
+#define QPU_SIG_SHIFT 60
+#define QPU_SIG_MASK QPU_MASK(63, 60)
+
+#define QPU_UNPACK_SHIFT 57
+#define QPU_UNPACK_MASK QPU_MASK(59, 57)
+
+/**
+ * If set, the pack field means PACK_MUL or R4 packing, instead of normal
+ * regfile a packing.
+ */
+#define QPU_PM ((uint64_t)1 << 56)
+
+#define QPU_PACK_SHIFT 52
+#define QPU_PACK_MASK QPU_MASK(55, 52)
+
+#define QPU_COND_ADD_SHIFT 49
+#define QPU_COND_ADD_MASK QPU_MASK(51, 49)
+#define QPU_COND_MUL_SHIFT 46
+#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
+
+#define QPU_SF ((uint64_t)1 << 45)
+
+#define QPU_WADDR_ADD_SHIFT 38
+#define QPU_WADDR_ADD_MASK QPU_MASK(43, 38)
+#define QPU_WADDR_MUL_SHIFT 32
+#define QPU_WADDR_MUL_MASK QPU_MASK(37, 32)
+
+#define QPU_OP_MUL_SHIFT 29
+#define QPU_OP_MUL_MASK QPU_MASK(31, 29)
+
+#define QPU_RADDR_A_SHIFT 18
+#define QPU_RADDR_A_MASK QPU_MASK(23, 18)
+#define QPU_RADDR_B_SHIFT 12
+#define QPU_RADDR_B_MASK QPU_MASK(17, 12)
+#define QPU_SMALL_IMM_SHIFT 12
+#define QPU_SMALL_IMM_MASK QPU_MASK(17, 12)
+
+#define QPU_ADD_A_SHIFT 9
+#define QPU_ADD_A_MASK QPU_MASK(11, 9)
+#define QPU_ADD_B_SHIFT 6
+#define QPU_ADD_B_MASK QPU_MASK(8, 6)
+#define QPU_MUL_A_SHIFT 3
+#define QPU_MUL_A_MASK QPU_MASK(5, 3)
+#define QPU_MUL_B_SHIFT 0
+#define QPU_MUL_B_MASK QPU_MASK(2, 0)
+
+#define QPU_WS ((uint64_t)1 << 44)
+
+#define QPU_OP_ADD_SHIFT 24
+#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
+
+#endif /* VC4_QPU_DEFINES_H */
diff --git a/chromium/third_party/libdrm/src/xf86atomic.h b/chromium/third_party/libdrm/src/xf86atomic.h
new file mode 100644
index 00000000000..922b37da625
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86atomic.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+/**
+ * @file xf86atomics.h
+ *
+ * Private definitions for atomic operations
+ */
+
+#ifndef LIBDRM_ATOMICS_H
+#define LIBDRM_ATOMICS_H
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#if HAVE_LIBDRM_ATOMIC_PRIMITIVES
+
+#define HAS_ATOMIC_OPS 1
+
+typedef struct {
+ int atomic;
+} atomic_t;
+
+# define atomic_read(x) ((x)->atomic)
+# define atomic_set(x, val) ((x)->atomic = (val))
+# define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1))
+# define atomic_inc_return(x) (__sync_add_and_fetch (&(x)->atomic, 1))
+# define atomic_dec_and_test(x) (__sync_add_and_fetch (&(x)->atomic, -1) == 0)
+# define atomic_add(x, v) ((void) __sync_add_and_fetch(&(x)->atomic, (v)))
+# define atomic_dec(x, v) ((void) __sync_sub_and_fetch(&(x)->atomic, (v)))
+# define atomic_cmpxchg(x, oldv, newv) __sync_val_compare_and_swap (&(x)->atomic, oldv, newv)
+
+#endif
+
+#if HAVE_LIB_ATOMIC_OPS
+#include <atomic_ops.h>
+
+#define HAS_ATOMIC_OPS 1
+
+typedef struct {
+ AO_t atomic;
+} atomic_t;
+
+# define atomic_read(x) AO_load_full(&(x)->atomic)
+# define atomic_set(x, val) AO_store_full(&(x)->atomic, (val))
+# define atomic_inc(x) ((void) AO_fetch_and_add1_full(&(x)->atomic))
+# define atomic_inc_return(x) (AO_fetch_and_add1_full(&(x)->atomic) + 1)
+# define atomic_add(x, v) ((void) AO_fetch_and_add_full(&(x)->atomic, (v)))
+# define atomic_dec(x, v) ((void) AO_fetch_and_add_full(&(x)->atomic, -(v)))
+# define atomic_dec_and_test(x) (AO_fetch_and_sub1_full(&(x)->atomic) == 1)
+# define atomic_cmpxchg(x, oldv, newv) AO_compare_and_swap_full(&(x)->atomic, oldv, newv)
+
+#endif
+
+#if (defined(__sun) || defined(__NetBSD__)) && !defined(HAS_ATOMIC_OPS) /* Solaris & OpenSolaris & NetBSD */
+
+#include <sys/atomic.h>
+#define HAS_ATOMIC_OPS 1
+
+#if defined(__NetBSD__)
+#define LIBDRM_ATOMIC_TYPE int
+#else
+#define LIBDRM_ATOMIC_TYPE uint_t
+#endif
+
+typedef struct { LIBDRM_ATOMIC_TYPE atomic; } atomic_t;
+
+# define atomic_read(x) (int) ((x)->atomic)
+# define atomic_set(x, val) ((x)->atomic = (LIBDRM_ATOMIC_TYPE)(val))
+# define atomic_inc(x) (atomic_inc_uint (&(x)->atomic))
+# define atomic_inc_return(x) (atomic_inc_uint_nv(&(x)->atomic))
+# define atomic_dec_and_test(x) (atomic_dec_uint_nv(&(x)->atomic) == 0)
+# define atomic_add(x, v) (atomic_add_int(&(x)->atomic, (v)))
+# define atomic_dec(x, v) (atomic_add_int(&(x)->atomic, -(v)))
+# define atomic_cmpxchg(x, oldv, newv) atomic_cas_uint (&(x)->atomic, oldv, newv)
+
+#endif
+
+#if ! HAS_ATOMIC_OPS
+#error libdrm requires atomic operations, please define them for your CPU/compiler.
+#endif
+
+static inline int atomic_add_unless(atomic_t *v, int add, int unless)
+{
+ int c, old;
+ c = atomic_read(v);
+ while (c != unless && (old = atomic_cmpxchg(v, c, c + add)) != c)
+ c = old;
+ return c == unless;
+}
+
+#endif
diff --git a/chromium/third_party/libdrm/src/xf86drm.c b/chromium/third_party/libdrm/src/xf86drm.c
new file mode 100644
index 00000000000..9b52889e4ce
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86drm.c
@@ -0,0 +1,3307 @@
+/**
+ * \file xf86drm.c
+ * User-level interface to DRM device
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Kevin E. Martin <martin@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <string.h>
+#include <strings.h>
+#include <ctype.h>
+#include <dirent.h>
+#include <stddef.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <limits.h>
+#include <signal.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#define stat_t struct stat
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <stdarg.h>
+#ifdef MAJOR_IN_MKDEV
+#include <sys/mkdev.h>
+#endif
+#ifdef MAJOR_IN_SYSMACROS
+#include <sys/sysmacros.h>
+#endif
+#include <math.h>
+
+/* Not all systems have MAP_FAILED defined */
+#ifndef MAP_FAILED
+#define MAP_FAILED ((void *)-1)
+#endif
+
+#include "xf86drm.h"
+#include "libdrm_macros.h"
+
+#include "util_math.h"
+
+#ifdef __OpenBSD__
+#define DRM_PRIMARY_MINOR_NAME "drm"
+#define DRM_CONTROL_MINOR_NAME "drmC"
+#define DRM_RENDER_MINOR_NAME "drmR"
+#else
+#define DRM_PRIMARY_MINOR_NAME "card"
+#define DRM_CONTROL_MINOR_NAME "controlD"
+#define DRM_RENDER_MINOR_NAME "renderD"
+#endif
+
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
+#define DRM_MAJOR 145
+#endif
+
+#ifdef __NetBSD__
+#define DRM_MAJOR 34
+#endif
+
+#ifdef __OpenBSD__
+#ifdef __i386__
+#define DRM_MAJOR 88
+#else
+#define DRM_MAJOR 87
+#endif
+#endif /* __OpenBSD__ */
+
+#ifndef DRM_MAJOR
+#define DRM_MAJOR 226 /* Linux */
+#endif
+
+#define DRM_MSG_VERBOSITY 3
+
+#define memclear(s) memset(&s, 0, sizeof(s))
+
+static drmServerInfoPtr drm_server_info;
+
+void drmSetServerInfo(drmServerInfoPtr info)
+{
+ drm_server_info = info;
+}
+
+/**
+ * Output a message to stderr.
+ *
+ * \param format printf() like format string.
+ *
+ * \internal
+ * This function is a wrapper around vfprintf().
+ */
+
+static int DRM_PRINTFLIKE(1, 0)
+drmDebugPrint(const char *format, va_list ap)
+{
+ return vfprintf(stderr, format, ap);
+}
+
+void
+drmMsg(const char *format, ...)
+{
+ va_list ap;
+ const char *env;
+ if (((env = getenv("LIBGL_DEBUG")) && strstr(env, "verbose")) ||
+ (drm_server_info && drm_server_info->debug_print))
+ {
+ va_start(ap, format);
+ if (drm_server_info) {
+ drm_server_info->debug_print(format,ap);
+ } else {
+ drmDebugPrint(format, ap);
+ }
+ va_end(ap);
+ }
+}
+
+static void *drmHashTable = NULL; /* Context switch callbacks */
+
+void *drmGetHashTable(void)
+{
+ return drmHashTable;
+}
+
+void *drmMalloc(int size)
+{
+ return calloc(1, size);
+}
+
+void drmFree(void *pt)
+{
+ free(pt);
+}
+
+/**
+ * Call ioctl, restarting if it is interupted
+ */
+int
+drmIoctl(int fd, unsigned long request, void *arg)
+{
+ int ret;
+
+ do {
+ ret = ioctl(fd, request, arg);
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+ return ret;
+}
+
+static unsigned long drmGetKeyFromFd(int fd)
+{
+ stat_t st;
+
+ st.st_rdev = 0;
+ fstat(fd, &st);
+ return st.st_rdev;
+}
+
+drmHashEntry *drmGetEntry(int fd)
+{
+ unsigned long key = drmGetKeyFromFd(fd);
+ void *value;
+ drmHashEntry *entry;
+
+ if (!drmHashTable)
+ drmHashTable = drmHashCreate();
+
+ if (drmHashLookup(drmHashTable, key, &value)) {
+ entry = drmMalloc(sizeof(*entry));
+ entry->fd = fd;
+ entry->f = NULL;
+ entry->tagTable = drmHashCreate();
+ drmHashInsert(drmHashTable, key, entry);
+ } else {
+ entry = value;
+ }
+ return entry;
+}
+
+/**
+ * Compare two busid strings
+ *
+ * \param first
+ * \param second
+ *
+ * \return 1 if matched.
+ *
+ * \internal
+ * This function compares two bus ID strings. It understands the older
+ * PCI:b:d:f format and the newer pci:oooo:bb:dd.f format. In the format, o is
+ * domain, b is bus, d is device, f is function.
+ */
+static int drmMatchBusID(const char *id1, const char *id2, int pci_domain_ok)
+{
+ /* First, check if the IDs are exactly the same */
+ if (strcasecmp(id1, id2) == 0)
+ return 1;
+
+ /* Try to match old/new-style PCI bus IDs. */
+ if (strncasecmp(id1, "pci", 3) == 0) {
+ unsigned int o1, b1, d1, f1;
+ unsigned int o2, b2, d2, f2;
+ int ret;
+
+ ret = sscanf(id1, "pci:%04x:%02x:%02x.%u", &o1, &b1, &d1, &f1);
+ if (ret != 4) {
+ o1 = 0;
+ ret = sscanf(id1, "PCI:%u:%u:%u", &b1, &d1, &f1);
+ if (ret != 3)
+ return 0;
+ }
+
+ ret = sscanf(id2, "pci:%04x:%02x:%02x.%u", &o2, &b2, &d2, &f2);
+ if (ret != 4) {
+ o2 = 0;
+ ret = sscanf(id2, "PCI:%u:%u:%u", &b2, &d2, &f2);
+ if (ret != 3)
+ return 0;
+ }
+
+ /* If domains aren't properly supported by the kernel interface,
+ * just ignore them, which sucks less than picking a totally random
+ * card with "open by name"
+ */
+ if (!pci_domain_ok)
+ o1 = o2 = 0;
+
+ if ((o1 != o2) || (b1 != b2) || (d1 != d2) || (f1 != f2))
+ return 0;
+ else
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Handles error checking for chown call.
+ *
+ * \param path to file.
+ * \param id of the new owner.
+ * \param id of the new group.
+ *
+ * \return zero if success or -1 if failure.
+ *
+ * \internal
+ * Checks for failure. If failure was caused by signal call chown again.
+ * If any other failure happened then it will output error mesage using
+ * drmMsg() call.
+ */
+#if !defined(UDEV)
+static int chown_check_return(const char *path, uid_t owner, gid_t group)
+{
+ int rv;
+
+ do {
+ rv = chown(path, owner, group);
+ } while (rv != 0 && errno == EINTR);
+
+ if (rv == 0)
+ return 0;
+
+ drmMsg("Failed to change owner or group for file %s! %d: %s\n",
+ path, errno, strerror(errno));
+ return -1;
+}
+#endif
+
+/**
+ * Open the DRM device, creating it if necessary.
+ *
+ * \param dev major and minor numbers of the device.
+ * \param minor minor number of the device.
+ *
+ * \return a file descriptor on success, or a negative value on error.
+ *
+ * \internal
+ * Assembles the device name from \p minor and opens it, creating the device
+ * special file node with the major and minor numbers specified by \p dev and
+ * parent directory if necessary and was called by root.
+ */
+static int drmOpenDevice(dev_t dev, int minor, int type)
+{
+ stat_t st;
+ const char *dev_name;
+ char buf[64];
+ int fd;
+ mode_t devmode = DRM_DEV_MODE, serv_mode;
+ gid_t serv_group;
+#if !defined(UDEV)
+ int isroot = !geteuid();
+ uid_t user = DRM_DEV_UID;
+ gid_t group = DRM_DEV_GID;
+#endif
+
+ switch (type) {
+ case DRM_NODE_PRIMARY:
+ dev_name = DRM_DEV_NAME;
+ break;
+ case DRM_NODE_CONTROL:
+ dev_name = DRM_CONTROL_DEV_NAME;
+ break;
+ case DRM_NODE_RENDER:
+ dev_name = DRM_RENDER_DEV_NAME;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ sprintf(buf, dev_name, DRM_DIR_NAME, minor);
+ drmMsg("drmOpenDevice: node name is %s\n", buf);
+
+ if (drm_server_info && drm_server_info->get_perms) {
+ drm_server_info->get_perms(&serv_group, &serv_mode);
+ devmode = serv_mode ? serv_mode : DRM_DEV_MODE;
+ devmode &= ~(S_IXUSR|S_IXGRP|S_IXOTH);
+ }
+
+#if !defined(UDEV)
+ if (stat(DRM_DIR_NAME, &st)) {
+ if (!isroot)
+ return DRM_ERR_NOT_ROOT;
+ mkdir(DRM_DIR_NAME, DRM_DEV_DIRMODE);
+ chown_check_return(DRM_DIR_NAME, 0, 0); /* root:root */
+ chmod(DRM_DIR_NAME, DRM_DEV_DIRMODE);
+ }
+
+ /* Check if the device node exists and create it if necessary. */
+ if (stat(buf, &st)) {
+ if (!isroot)
+ return DRM_ERR_NOT_ROOT;
+ remove(buf);
+ mknod(buf, S_IFCHR | devmode, dev);
+ }
+
+ if (drm_server_info && drm_server_info->get_perms) {
+ group = ((int)serv_group >= 0) ? serv_group : DRM_DEV_GID;
+ chown_check_return(buf, user, group);
+ chmod(buf, devmode);
+ }
+#else
+ /* if we modprobed then wait for udev */
+ {
+ int udev_count = 0;
+wait_for_udev:
+ if (stat(DRM_DIR_NAME, &st)) {
+ usleep(20);
+ udev_count++;
+
+ if (udev_count == 50)
+ return -1;
+ goto wait_for_udev;
+ }
+
+ if (stat(buf, &st)) {
+ usleep(20);
+ udev_count++;
+
+ if (udev_count == 50)
+ return -1;
+ goto wait_for_udev;
+ }
+ }
+#endif
+
+ fd = open(buf, O_RDWR, 0);
+ drmMsg("drmOpenDevice: open result is %d, (%s)\n",
+ fd, fd < 0 ? strerror(errno) : "OK");
+ if (fd >= 0)
+ return fd;
+
+#if !defined(UDEV)
+ /* Check if the device node is not what we expect it to be, and recreate it
+ * and try again if so.
+ */
+ if (st.st_rdev != dev) {
+ if (!isroot)
+ return DRM_ERR_NOT_ROOT;
+ remove(buf);
+ mknod(buf, S_IFCHR | devmode, dev);
+ if (drm_server_info && drm_server_info->get_perms) {
+ chown_check_return(buf, user, group);
+ chmod(buf, devmode);
+ }
+ }
+ fd = open(buf, O_RDWR, 0);
+ drmMsg("drmOpenDevice: open result is %d, (%s)\n",
+ fd, fd < 0 ? strerror(errno) : "OK");
+ if (fd >= 0)
+ return fd;
+
+ drmMsg("drmOpenDevice: Open failed\n");
+ remove(buf);
+#endif
+ return -errno;
+}
+
+
+/**
+ * Open the DRM device
+ *
+ * \param minor device minor number.
+ * \param create allow to create the device if set.
+ *
+ * \return a file descriptor on success, or a negative value on error.
+ *
+ * \internal
+ * Calls drmOpenDevice() if \p create is set, otherwise assembles the device
+ * name from \p minor and opens it.
+ */
+static int drmOpenMinor(int minor, int create, int type)
+{
+ int fd;
+ char buf[64];
+ const char *dev_name;
+
+ if (create)
+ return drmOpenDevice(makedev(DRM_MAJOR, minor), minor, type);
+
+ switch (type) {
+ case DRM_NODE_PRIMARY:
+ dev_name = DRM_DEV_NAME;
+ break;
+ case DRM_NODE_CONTROL:
+ dev_name = DRM_CONTROL_DEV_NAME;
+ break;
+ case DRM_NODE_RENDER:
+ dev_name = DRM_RENDER_DEV_NAME;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ sprintf(buf, dev_name, DRM_DIR_NAME, minor);
+ if ((fd = open(buf, O_RDWR, 0)) >= 0)
+ return fd;
+ return -errno;
+}
+
+
+/**
+ * Determine whether the DRM kernel driver has been loaded.
+ *
+ * \return 1 if the DRM driver is loaded, 0 otherwise.
+ *
+ * \internal
+ * Determine the presence of the kernel driver by attempting to open the 0
+ * minor and get version information. For backward compatibility with older
+ * Linux implementations, /proc/dri is also checked.
+ */
+int drmAvailable(void)
+{
+ drmVersionPtr version;
+ int retval = 0;
+ int fd;
+
+ if ((fd = drmOpenMinor(0, 1, DRM_NODE_PRIMARY)) < 0) {
+#ifdef __linux__
+ /* Try proc for backward Linux compatibility */
+ if (!access("/proc/dri/0", R_OK))
+ return 1;
+#endif
+ return 0;
+ }
+
+ if ((version = drmGetVersion(fd))) {
+ retval = 1;
+ drmFreeVersion(version);
+ }
+ close(fd);
+
+ return retval;
+}
+
+static int drmGetMinorBase(int type)
+{
+ switch (type) {
+ case DRM_NODE_PRIMARY:
+ return 0;
+ case DRM_NODE_CONTROL:
+ return 64;
+ case DRM_NODE_RENDER:
+ return 128;
+ default:
+ return -1;
+ };
+}
+
+static int drmGetMinorType(int minor)
+{
+ int type = minor >> 6;
+
+ if (minor < 0)
+ return -1;
+
+ switch (type) {
+ case DRM_NODE_PRIMARY:
+ case DRM_NODE_CONTROL:
+ case DRM_NODE_RENDER:
+ return type;
+ default:
+ return -1;
+ }
+}
+
+static const char *drmGetMinorName(int type)
+{
+ switch (type) {
+ case DRM_NODE_PRIMARY:
+ return DRM_PRIMARY_MINOR_NAME;
+ case DRM_NODE_CONTROL:
+ return DRM_CONTROL_MINOR_NAME;
+ case DRM_NODE_RENDER:
+ return DRM_RENDER_MINOR_NAME;
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * Open the device by bus ID.
+ *
+ * \param busid bus ID.
+ * \param type device node type.
+ *
+ * \return a file descriptor on success, or a negative value on error.
+ *
+ * \internal
+ * This function attempts to open every possible minor (up to DRM_MAX_MINOR),
+ * comparing the device bus ID with the one supplied.
+ *
+ * \sa drmOpenMinor() and drmGetBusid().
+ */
+static int drmOpenByBusid(const char *busid, int type)
+{
+ int i, pci_domain_ok = 1;
+ int fd;
+ const char *buf;
+ drmSetVersion sv;
+ int base = drmGetMinorBase(type);
+
+ if (base < 0)
+ return -1;
+
+ drmMsg("drmOpenByBusid: Searching for BusID %s\n", busid);
+ for (i = base; i < base + DRM_MAX_MINOR; i++) {
+ fd = drmOpenMinor(i, 1, type);
+ drmMsg("drmOpenByBusid: drmOpenMinor returns %d\n", fd);
+ if (fd >= 0) {
+ /* We need to try for 1.4 first for proper PCI domain support
+ * and if that fails, we know the kernel is busted
+ */
+ sv.drm_di_major = 1;
+ sv.drm_di_minor = 4;
+ sv.drm_dd_major = -1; /* Don't care */
+ sv.drm_dd_minor = -1; /* Don't care */
+ if (drmSetInterfaceVersion(fd, &sv)) {
+#ifndef __alpha__
+ pci_domain_ok = 0;
+#endif
+ sv.drm_di_major = 1;
+ sv.drm_di_minor = 1;
+ sv.drm_dd_major = -1; /* Don't care */
+ sv.drm_dd_minor = -1; /* Don't care */
+ drmMsg("drmOpenByBusid: Interface 1.4 failed, trying 1.1\n");
+ drmSetInterfaceVersion(fd, &sv);
+ }
+ buf = drmGetBusid(fd);
+ drmMsg("drmOpenByBusid: drmGetBusid reports %s\n", buf);
+ if (buf && drmMatchBusID(buf, busid, pci_domain_ok)) {
+ drmFreeBusid(buf);
+ return fd;
+ }
+ if (buf)
+ drmFreeBusid(buf);
+ close(fd);
+ }
+ }
+ return -1;
+}
+
+
+/**
+ * Open the device by name.
+ *
+ * \param name driver name.
+ * \param type the device node type.
+ *
+ * \return a file descriptor on success, or a negative value on error.
+ *
+ * \internal
+ * This function opens the first minor number that matches the driver name and
+ * isn't already in use. If it's in use it then it will already have a bus ID
+ * assigned.
+ *
+ * \sa drmOpenMinor(), drmGetVersion() and drmGetBusid().
+ */
+static int drmOpenByName(const char *name, int type)
+{
+ int i;
+ int fd;
+ drmVersionPtr version;
+ char * id;
+ int base = drmGetMinorBase(type);
+
+ if (base < 0)
+ return -1;
+
+ /*
+ * Open the first minor number that matches the driver name and isn't
+ * already in use. If it's in use it will have a busid assigned already.
+ */
+ for (i = base; i < base + DRM_MAX_MINOR; i++) {
+ if ((fd = drmOpenMinor(i, 1, type)) >= 0) {
+ if ((version = drmGetVersion(fd))) {
+ if (!strcmp(version->name, name)) {
+ drmFreeVersion(version);
+ id = drmGetBusid(fd);
+ drmMsg("drmGetBusid returned '%s'\n", id ? id : "NULL");
+ if (!id || !*id) {
+ if (id)
+ drmFreeBusid(id);
+ return fd;
+ } else {
+ drmFreeBusid(id);
+ }
+ } else {
+ drmFreeVersion(version);
+ }
+ }
+ close(fd);
+ }
+ }
+
+#ifdef __linux__
+ /* Backward-compatibility /proc support */
+ for (i = 0; i < 8; i++) {
+ char proc_name[64], buf[512];
+ char *driver, *pt, *devstring;
+ int retcode;
+
+ sprintf(proc_name, "/proc/dri/%d/name", i);
+ if ((fd = open(proc_name, 0, 0)) >= 0) {
+ retcode = read(fd, buf, sizeof(buf)-1);
+ close(fd);
+ if (retcode) {
+ buf[retcode-1] = '\0';
+ for (driver = pt = buf; *pt && *pt != ' '; ++pt)
+ ;
+ if (*pt) { /* Device is next */
+ *pt = '\0';
+ if (!strcmp(driver, name)) { /* Match */
+ for (devstring = ++pt; *pt && *pt != ' '; ++pt)
+ ;
+ if (*pt) { /* Found busid */
+ return drmOpenByBusid(++pt, type);
+ } else { /* No busid */
+ return drmOpenDevice(strtol(devstring, NULL, 0),i, type);
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ return -1;
+}
+
+
+/**
+ * Open the DRM device.
+ *
+ * Looks up the specified name and bus ID, and opens the device found. The
+ * entry in /dev/dri is created if necessary and if called by root.
+ *
+ * \param name driver name. Not referenced if bus ID is supplied.
+ * \param busid bus ID. Zero if not known.
+ *
+ * \return a file descriptor on success, or a negative value on error.
+ *
+ * \internal
+ * It calls drmOpenByBusid() if \p busid is specified or drmOpenByName()
+ * otherwise.
+ */
+int drmOpen(const char *name, const char *busid)
+{
+ return drmOpenWithType(name, busid, DRM_NODE_PRIMARY);
+}
+
+/**
+ * Open the DRM device with specified type.
+ *
+ * Looks up the specified name and bus ID, and opens the device found. The
+ * entry in /dev/dri is created if necessary and if called by root.
+ *
+ * \param name driver name. Not referenced if bus ID is supplied.
+ * \param busid bus ID. Zero if not known.
+ * \param type the device node type to open, PRIMARY, CONTROL or RENDER
+ *
+ * \return a file descriptor on success, or a negative value on error.
+ *
+ * \internal
+ * It calls drmOpenByBusid() if \p busid is specified or drmOpenByName()
+ * otherwise.
+ */
+int drmOpenWithType(const char *name, const char *busid, int type)
+{
+ if (!drmAvailable() && name != NULL && drm_server_info &&
+ drm_server_info->load_module) {
+ /* try to load the kernel module */
+ if (!drm_server_info->load_module(name)) {
+ drmMsg("[drm] failed to load kernel module \"%s\"\n", name);
+ return -1;
+ }
+ }
+
+ if (busid) {
+ int fd = drmOpenByBusid(busid, type);
+ if (fd >= 0)
+ return fd;
+ }
+
+ if (name)
+ return drmOpenByName(name, type);
+
+ return -1;
+}
+
+int drmOpenControl(int minor)
+{
+ return drmOpenMinor(minor, 0, DRM_NODE_CONTROL);
+}
+
+int drmOpenRender(int minor)
+{
+ return drmOpenMinor(minor, 0, DRM_NODE_RENDER);
+}
+
+/**
+ * Free the version information returned by drmGetVersion().
+ *
+ * \param v pointer to the version information.
+ *
+ * \internal
+ * It frees the memory pointed by \p %v as well as all the non-null strings
+ * pointers in it.
+ */
+void drmFreeVersion(drmVersionPtr v)
+{
+ if (!v)
+ return;
+ drmFree(v->name);
+ drmFree(v->date);
+ drmFree(v->desc);
+ drmFree(v);
+}
+
+
+/**
+ * Free the non-public version information returned by the kernel.
+ *
+ * \param v pointer to the version information.
+ *
+ * \internal
+ * Used by drmGetVersion() to free the memory pointed by \p %v as well as all
+ * the non-null strings pointers in it.
+ */
+static void drmFreeKernelVersion(drm_version_t *v)
+{
+ if (!v)
+ return;
+ drmFree(v->name);
+ drmFree(v->date);
+ drmFree(v->desc);
+ drmFree(v);
+}
+
+
+/**
+ * Copy version information.
+ *
+ * \param d destination pointer.
+ * \param s source pointer.
+ *
+ * \internal
+ * Used by drmGetVersion() to translate the information returned by the ioctl
+ * interface in a private structure into the public structure counterpart.
+ */
+static void drmCopyVersion(drmVersionPtr d, const drm_version_t *s)
+{
+ d->version_major = s->version_major;
+ d->version_minor = s->version_minor;
+ d->version_patchlevel = s->version_patchlevel;
+ d->name_len = s->name_len;
+ d->name = strdup(s->name);
+ d->date_len = s->date_len;
+ d->date = strdup(s->date);
+ d->desc_len = s->desc_len;
+ d->desc = strdup(s->desc);
+}
+
+
+/**
+ * Query the driver version information.
+ *
+ * \param fd file descriptor.
+ *
+ * \return pointer to a drmVersion structure which should be freed with
+ * drmFreeVersion().
+ *
+ * \note Similar information is available via /proc/dri.
+ *
+ * \internal
+ * It gets the version information via successive DRM_IOCTL_VERSION ioctls,
+ * first with zeros to get the string lengths, and then the actually strings.
+ * It also null-terminates them since they might not be already.
+ */
+drmVersionPtr drmGetVersion(int fd)
+{
+ drmVersionPtr retval;
+ drm_version_t *version = drmMalloc(sizeof(*version));
+
+ memclear(*version);
+
+ if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
+ drmFreeKernelVersion(version);
+ return NULL;
+ }
+
+ if (version->name_len)
+ version->name = drmMalloc(version->name_len + 1);
+ if (version->date_len)
+ version->date = drmMalloc(version->date_len + 1);
+ if (version->desc_len)
+ version->desc = drmMalloc(version->desc_len + 1);
+
+ if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
+ drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno));
+ drmFreeKernelVersion(version);
+ return NULL;
+ }
+
+ /* The results might not be null-terminated strings, so terminate them. */
+ if (version->name_len) version->name[version->name_len] = '\0';
+ if (version->date_len) version->date[version->date_len] = '\0';
+ if (version->desc_len) version->desc[version->desc_len] = '\0';
+
+ retval = drmMalloc(sizeof(*retval));
+ drmCopyVersion(retval, version);
+ drmFreeKernelVersion(version);
+ return retval;
+}
+
+
+/**
+ * Get version information for the DRM user space library.
+ *
+ * This version number is driver independent.
+ *
+ * \param fd file descriptor.
+ *
+ * \return version information.
+ *
+ * \internal
+ * This function allocates and fills a drm_version structure with a hard coded
+ * version number.
+ */
+drmVersionPtr drmGetLibVersion(int fd)
+{
+ drm_version_t *version = drmMalloc(sizeof(*version));
+
+ /* Version history:
+ * NOTE THIS MUST NOT GO ABOVE VERSION 1.X due to drivers needing it
+ * revision 1.0.x = original DRM interface with no drmGetLibVersion
+ * entry point and many drm<Device> extensions
+ * revision 1.1.x = added drmCommand entry points for device extensions
+ * added drmGetLibVersion to identify libdrm.a version
+ * revision 1.2.x = added drmSetInterfaceVersion
+ * modified drmOpen to handle both busid and name
+ * revision 1.3.x = added server + memory manager
+ */
+ version->version_major = 1;
+ version->version_minor = 3;
+ version->version_patchlevel = 0;
+
+ return (drmVersionPtr)version;
+}
+
+int drmGetCap(int fd, uint64_t capability, uint64_t *value)
+{
+ struct drm_get_cap cap;
+ int ret;
+
+ memclear(cap);
+ cap.capability = capability;
+
+ ret = drmIoctl(fd, DRM_IOCTL_GET_CAP, &cap);
+ if (ret)
+ return ret;
+
+ *value = cap.value;
+ return 0;
+}
+
+int drmSetClientCap(int fd, uint64_t capability, uint64_t value)
+{
+ struct drm_set_client_cap cap;
+
+ memclear(cap);
+ cap.capability = capability;
+ cap.value = value;
+
+ return drmIoctl(fd, DRM_IOCTL_SET_CLIENT_CAP, &cap);
+}
+
+/**
+ * Free the bus ID information.
+ *
+ * \param busid bus ID information string as given by drmGetBusid().
+ *
+ * \internal
+ * This function is just frees the memory pointed by \p busid.
+ */
+void drmFreeBusid(const char *busid)
+{
+ drmFree((void *)busid);
+}
+
+
+/**
+ * Get the bus ID of the device.
+ *
+ * \param fd file descriptor.
+ *
+ * \return bus ID string.
+ *
+ * \internal
+ * This function gets the bus ID via successive DRM_IOCTL_GET_UNIQUE ioctls to
+ * get the string length and data, passing the arguments in a drm_unique
+ * structure.
+ */
+char *drmGetBusid(int fd)
+{
+ drm_unique_t u;
+
+ memclear(u);
+
+ if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
+ return NULL;
+ u.unique = drmMalloc(u.unique_len + 1);
+ if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
+ return NULL;
+ u.unique[u.unique_len] = '\0';
+
+ return u.unique;
+}
+
+
+/**
+ * Set the bus ID of the device.
+ *
+ * \param fd file descriptor.
+ * \param busid bus ID string.
+ *
+ * \return zero on success, negative on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_SET_UNIQUE ioctl, passing
+ * the arguments in a drm_unique structure.
+ */
+int drmSetBusid(int fd, const char *busid)
+{
+ drm_unique_t u;
+
+ memclear(u);
+ u.unique = (char *)busid;
+ u.unique_len = strlen(busid);
+
+ if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
+ return -errno;
+ }
+ return 0;
+}
+
+int drmGetMagic(int fd, drm_magic_t * magic)
+{
+ drm_auth_t auth;
+
+ memclear(auth);
+
+ *magic = 0;
+ if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
+ return -errno;
+ *magic = auth.magic;
+ return 0;
+}
+
+int drmAuthMagic(int fd, drm_magic_t magic)
+{
+ drm_auth_t auth;
+
+ memclear(auth);
+ auth.magic = magic;
+ if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
+ return -errno;
+ return 0;
+}
+
+/**
+ * Specifies a range of memory that is available for mapping by a
+ * non-root process.
+ *
+ * \param fd file descriptor.
+ * \param offset usually the physical address. The actual meaning depends of
+ * the \p type parameter. See below.
+ * \param size of the memory in bytes.
+ * \param type type of the memory to be mapped.
+ * \param flags combination of several flags to modify the function actions.
+ * \param handle will be set to a value that may be used as the offset
+ * parameter for mmap().
+ *
+ * \return zero on success or a negative value on error.
+ *
+ * \par Mapping the frame buffer
+ * For the frame buffer
+ * - \p offset will be the physical address of the start of the frame buffer,
+ * - \p size will be the size of the frame buffer in bytes, and
+ * - \p type will be DRM_FRAME_BUFFER.
+ *
+ * \par
+ * The area mapped will be uncached. If MTRR support is available in the
+ * kernel, the frame buffer area will be set to write combining.
+ *
+ * \par Mapping the MMIO register area
+ * For the MMIO register area,
+ * - \p offset will be the physical address of the start of the register area,
+ * - \p size will be the size of the register area bytes, and
+ * - \p type will be DRM_REGISTERS.
+ * \par
+ * The area mapped will be uncached.
+ *
+ * \par Mapping the SAREA
+ * For the SAREA,
+ * - \p offset will be ignored and should be set to zero,
+ * - \p size will be the desired size of the SAREA in bytes,
+ * - \p type will be DRM_SHM.
+ *
+ * \par
+ * A shared memory area of the requested size will be created and locked in
+ * kernel memory. This area may be mapped into client-space by using the handle
+ * returned.
+ *
+ * \note May only be called by root.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_ADD_MAP ioctl, passing
+ * the arguments in a drm_map structure.
+ */
+int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
+ drmMapFlags flags, drm_handle_t *handle)
+{
+ drm_map_t map;
+
+ memclear(map);
+ map.offset = offset;
+ map.size = size;
+ map.type = type;
+ map.flags = flags;
+ if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))
+ return -errno;
+ if (handle)
+ *handle = (drm_handle_t)(uintptr_t)map.handle;
+ return 0;
+}
+
+int drmRmMap(int fd, drm_handle_t handle)
+{
+ drm_map_t map;
+
+ memclear(map);
+ map.handle = (void *)(uintptr_t)handle;
+
+ if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map))
+ return -errno;
+ return 0;
+}
+
+/**
+ * Make buffers available for DMA transfers.
+ *
+ * \param fd file descriptor.
+ * \param count number of buffers.
+ * \param size size of each buffer.
+ * \param flags buffer allocation flags.
+ * \param agp_offset offset in the AGP aperture
+ *
+ * \return number of buffers allocated, negative on error.
+ *
+ * \internal
+ * This function is a wrapper around DRM_IOCTL_ADD_BUFS ioctl.
+ *
+ * \sa drm_buf_desc.
+ */
+int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
+ int agp_offset)
+{
+ drm_buf_desc_t request;
+
+ memclear(request);
+ request.count = count;
+ request.size = size;
+ request.flags = flags;
+ request.agp_start = agp_offset;
+
+ if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request))
+ return -errno;
+ return request.count;
+}
+
+int drmMarkBufs(int fd, double low, double high)
+{
+ drm_buf_info_t info;
+ int i;
+
+ memclear(info);
+
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
+ return -EINVAL;
+
+ if (!info.count)
+ return -EINVAL;
+
+ if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
+ return -ENOMEM;
+
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
+ int retval = -errno;
+ drmFree(info.list);
+ return retval;
+ }
+
+ for (i = 0; i < info.count; i++) {
+ info.list[i].low_mark = low * info.list[i].count;
+ info.list[i].high_mark = high * info.list[i].count;
+ if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
+ int retval = -errno;
+ drmFree(info.list);
+ return retval;
+ }
+ }
+ drmFree(info.list);
+
+ return 0;
+}
+
+/**
+ * Free buffers.
+ *
+ * \param fd file descriptor.
+ * \param count number of buffers to free.
+ * \param list list of buffers to be freed.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \note This function is primarily used for debugging.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_FREE_BUFS ioctl, passing
+ * the arguments in a drm_buf_free structure.
+ */
+int drmFreeBufs(int fd, int count, int *list)
+{
+ drm_buf_free_t request;
+
+ memclear(request);
+ request.count = count;
+ request.list = list;
+ if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request))
+ return -errno;
+ return 0;
+}
+
+
+/**
+ * Close the device.
+ *
+ * \param fd file descriptor.
+ *
+ * \internal
+ * This function closes the file descriptor.
+ */
+int drmClose(int fd)
+{
+ unsigned long key = drmGetKeyFromFd(fd);
+ drmHashEntry *entry = drmGetEntry(fd);
+
+ drmHashDestroy(entry->tagTable);
+ entry->fd = 0;
+ entry->f = NULL;
+ entry->tagTable = NULL;
+
+ drmHashDelete(drmHashTable, key);
+ drmFree(entry);
+
+ return close(fd);
+}
+
+
+/**
+ * Map a region of memory.
+ *
+ * \param fd file descriptor.
+ * \param handle handle returned by drmAddMap().
+ * \param size size in bytes. Must match the size used by drmAddMap().
+ * \param address will contain the user-space virtual address where the mapping
+ * begins.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper for mmap().
+ */
+int drmMap(int fd, drm_handle_t handle, drmSize size, drmAddressPtr address)
+{
+ static unsigned long pagesize_mask = 0;
+
+ if (fd < 0)
+ return -EINVAL;
+
+ if (!pagesize_mask)
+ pagesize_mask = getpagesize() - 1;
+
+ size = (size + pagesize_mask) & ~pagesize_mask;
+
+ *address = drm_mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, handle);
+ if (*address == MAP_FAILED)
+ return -errno;
+ return 0;
+}
+
+
+/**
+ * Unmap mappings obtained with drmMap().
+ *
+ * \param address address as given by drmMap().
+ * \param size size in bytes. Must match the size used by drmMap().
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper for munmap().
+ */
+int drmUnmap(drmAddress address, drmSize size)
+{
+ return drm_munmap(address, size);
+}
+
+drmBufInfoPtr drmGetBufInfo(int fd)
+{
+ drm_buf_info_t info;
+ drmBufInfoPtr retval;
+ int i;
+
+ memclear(info);
+
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
+ return NULL;
+
+ if (info.count) {
+ if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
+ return NULL;
+
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
+ drmFree(info.list);
+ return NULL;
+ }
+
+ retval = drmMalloc(sizeof(*retval));
+ retval->count = info.count;
+ retval->list = drmMalloc(info.count * sizeof(*retval->list));
+ for (i = 0; i < info.count; i++) {
+ retval->list[i].count = info.list[i].count;
+ retval->list[i].size = info.list[i].size;
+ retval->list[i].low_mark = info.list[i].low_mark;
+ retval->list[i].high_mark = info.list[i].high_mark;
+ }
+ drmFree(info.list);
+ return retval;
+ }
+ return NULL;
+}
+
+/**
+ * Map all DMA buffers into client-virtual space.
+ *
+ * \param fd file descriptor.
+ *
+ * \return a pointer to a ::drmBufMap structure.
+ *
+ * \note The client may not use these buffers until obtaining buffer indices
+ * with drmDMA().
+ *
+ * \internal
+ * This function calls the DRM_IOCTL_MAP_BUFS ioctl and copies the returned
+ * information about the buffers in a drm_buf_map structure into the
+ * client-visible data structures.
+ */
+drmBufMapPtr drmMapBufs(int fd)
+{
+ drm_buf_map_t bufs;
+ drmBufMapPtr retval;
+ int i;
+
+ memclear(bufs);
+ if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
+ return NULL;
+
+ if (!bufs.count)
+ return NULL;
+
+ if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list))))
+ return NULL;
+
+ if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
+ drmFree(bufs.list);
+ return NULL;
+ }
+
+ retval = drmMalloc(sizeof(*retval));
+ retval->count = bufs.count;
+ retval->list = drmMalloc(bufs.count * sizeof(*retval->list));
+ for (i = 0; i < bufs.count; i++) {
+ retval->list[i].idx = bufs.list[i].idx;
+ retval->list[i].total = bufs.list[i].total;
+ retval->list[i].used = 0;
+ retval->list[i].address = bufs.list[i].address;
+ }
+
+ drmFree(bufs.list);
+ return retval;
+}
+
+
+/**
+ * Unmap buffers allocated with drmMapBufs().
+ *
+ * \return zero on success, or negative value on failure.
+ *
+ * \internal
+ * Calls munmap() for every buffer stored in \p bufs and frees the
+ * memory allocated by drmMapBufs().
+ */
+int drmUnmapBufs(drmBufMapPtr bufs)
+{
+ int i;
+
+ for (i = 0; i < bufs->count; i++) {
+ drm_munmap(bufs->list[i].address, bufs->list[i].total);
+ }
+
+ drmFree(bufs->list);
+ drmFree(bufs);
+ return 0;
+}
+
+
+#define DRM_DMA_RETRY 16
+
+/**
+ * Reserve DMA buffers.
+ *
+ * \param fd file descriptor.
+ * \param request
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * Assemble the arguments into a drm_dma structure and keeps issuing the
+ * DRM_IOCTL_DMA ioctl until success or until maximum number of retries.
+ */
+int drmDMA(int fd, drmDMAReqPtr request)
+{
+ drm_dma_t dma;
+ int ret, i = 0;
+
+ dma.context = request->context;
+ dma.send_count = request->send_count;
+ dma.send_indices = request->send_list;
+ dma.send_sizes = request->send_sizes;
+ dma.flags = request->flags;
+ dma.request_count = request->request_count;
+ dma.request_size = request->request_size;
+ dma.request_indices = request->request_list;
+ dma.request_sizes = request->request_sizes;
+ dma.granted_count = 0;
+
+ do {
+ ret = ioctl( fd, DRM_IOCTL_DMA, &dma );
+ } while ( ret && errno == EAGAIN && i++ < DRM_DMA_RETRY );
+
+ if ( ret == 0 ) {
+ request->granted_count = dma.granted_count;
+ return 0;
+ } else {
+ return -errno;
+ }
+}
+
+
+/**
+ * Obtain heavyweight hardware lock.
+ *
+ * \param fd file descriptor.
+ * \param context context.
+ * \param flags flags that determine the sate of the hardware when the function
+ * returns.
+ *
+ * \return always zero.
+ *
+ * \internal
+ * This function translates the arguments into a drm_lock structure and issue
+ * the DRM_IOCTL_LOCK ioctl until the lock is successfully acquired.
+ */
+int drmGetLock(int fd, drm_context_t context, drmLockFlags flags)
+{
+ drm_lock_t lock;
+
+ memclear(lock);
+ lock.context = context;
+ lock.flags = 0;
+ if (flags & DRM_LOCK_READY) lock.flags |= _DRM_LOCK_READY;
+ if (flags & DRM_LOCK_QUIESCENT) lock.flags |= _DRM_LOCK_QUIESCENT;
+ if (flags & DRM_LOCK_FLUSH) lock.flags |= _DRM_LOCK_FLUSH;
+ if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL;
+ if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
+ if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
+
+ while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock))
+ ;
+ return 0;
+}
+
+/**
+ * Release the hardware lock.
+ *
+ * \param fd file descriptor.
+ * \param context context.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_UNLOCK ioctl, passing the
+ * argument in a drm_lock structure.
+ */
+int drmUnlock(int fd, drm_context_t context)
+{
+ drm_lock_t lock;
+
+ memclear(lock);
+ lock.context = context;
+ return drmIoctl(fd, DRM_IOCTL_UNLOCK, &lock);
+}
+
+drm_context_t *drmGetReservedContextList(int fd, int *count)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t *list;
+ drm_context_t * retval;
+ int i;
+
+ memclear(res);
+ if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
+ return NULL;
+
+ if (!res.count)
+ return NULL;
+
+ if (!(list = drmMalloc(res.count * sizeof(*list))))
+ return NULL;
+ if (!(retval = drmMalloc(res.count * sizeof(*retval)))) {
+ drmFree(list);
+ return NULL;
+ }
+
+ res.contexts = list;
+ if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
+ return NULL;
+
+ for (i = 0; i < res.count; i++)
+ retval[i] = list[i].handle;
+ drmFree(list);
+
+ *count = res.count;
+ return retval;
+}
+
+void drmFreeReservedContextList(drm_context_t *pt)
+{
+ drmFree(pt);
+}
+
+/**
+ * Create context.
+ *
+ * Used by the X server during GLXContext initialization. This causes
+ * per-context kernel-level resources to be allocated.
+ *
+ * \param fd file descriptor.
+ * \param handle is set on success. To be used by the client when requesting DMA
+ * dispatch with drmDMA().
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \note May only be called by root.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_ADD_CTX ioctl, passing the
+ * argument in a drm_ctx structure.
+ */
+int drmCreateContext(int fd, drm_context_t *handle)
+{
+ drm_ctx_t ctx;
+
+ memclear(ctx);
+ if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
+ return -errno;
+ *handle = ctx.handle;
+ return 0;
+}
+
+int drmSwitchToContext(int fd, drm_context_t context)
+{
+ drm_ctx_t ctx;
+
+ memclear(ctx);
+ ctx.handle = context;
+ if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
+ return -errno;
+ return 0;
+}
+
+int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags)
+{
+ drm_ctx_t ctx;
+
+ /*
+ * Context preserving means that no context switches are done between DMA
+ * buffers from one context and the next. This is suitable for use in the
+ * X server (which promises to maintain hardware context), or in the
+ * client-side library when buffers are swapped on behalf of two threads.
+ */
+ memclear(ctx);
+ ctx.handle = context;
+ if (flags & DRM_CONTEXT_PRESERVED)
+ ctx.flags |= _DRM_CONTEXT_PRESERVED;
+ if (flags & DRM_CONTEXT_2DONLY)
+ ctx.flags |= _DRM_CONTEXT_2DONLY;
+ if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
+ return -errno;
+ return 0;
+}
+
+int drmGetContextFlags(int fd, drm_context_t context,
+ drm_context_tFlagsPtr flags)
+{
+ drm_ctx_t ctx;
+
+ memclear(ctx);
+ ctx.handle = context;
+ if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx))
+ return -errno;
+ *flags = 0;
+ if (ctx.flags & _DRM_CONTEXT_PRESERVED)
+ *flags |= DRM_CONTEXT_PRESERVED;
+ if (ctx.flags & _DRM_CONTEXT_2DONLY)
+ *flags |= DRM_CONTEXT_2DONLY;
+ return 0;
+}
+
+/**
+ * Destroy context.
+ *
+ * Free any kernel-level resources allocated with drmCreateContext() associated
+ * with the context.
+ *
+ * \param fd file descriptor.
+ * \param handle handle given by drmCreateContext().
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \note May only be called by root.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_RM_CTX ioctl, passing the
+ * argument in a drm_ctx structure.
+ */
+int drmDestroyContext(int fd, drm_context_t handle)
+{
+ drm_ctx_t ctx;
+
+ memclear(ctx);
+ ctx.handle = handle;
+ if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx))
+ return -errno;
+ return 0;
+}
+
+int drmCreateDrawable(int fd, drm_drawable_t *handle)
+{
+ drm_draw_t draw;
+
+ memclear(draw);
+ if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
+ return -errno;
+ *handle = draw.handle;
+ return 0;
+}
+
+int drmDestroyDrawable(int fd, drm_drawable_t handle)
+{
+ drm_draw_t draw;
+
+ memclear(draw);
+ draw.handle = handle;
+ if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw))
+ return -errno;
+ return 0;
+}
+
+int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
+ drm_drawable_info_type_t type, unsigned int num,
+ void *data)
+{
+ drm_update_draw_t update;
+
+ memclear(update);
+ update.handle = handle;
+ update.type = type;
+ update.num = num;
+ update.data = (unsigned long long)(unsigned long)data;
+
+ if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
+ return -errno;
+
+ return 0;
+}
+
+/**
+ * Acquire the AGP device.
+ *
+ * Must be called before any of the other AGP related calls.
+ *
+ * \param fd file descriptor.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_ACQUIRE ioctl.
+ */
+int drmAgpAcquire(int fd)
+{
+ if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
+ return -errno;
+ return 0;
+}
+
+
+/**
+ * Release the AGP device.
+ *
+ * \param fd file descriptor.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_RELEASE ioctl.
+ */
+int drmAgpRelease(int fd)
+{
+ if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
+ return -errno;
+ return 0;
+}
+
+
+/**
+ * Set the AGP mode.
+ *
+ * \param fd file descriptor.
+ * \param mode AGP mode.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_ENABLE ioctl, passing the
+ * argument in a drm_agp_mode structure.
+ */
+int drmAgpEnable(int fd, unsigned long mode)
+{
+ drm_agp_mode_t m;
+
+ memclear(m);
+ m.mode = mode;
+ if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
+ return -errno;
+ return 0;
+}
+
+
+/**
+ * Allocate a chunk of AGP memory.
+ *
+ * \param fd file descriptor.
+ * \param size requested memory size in bytes. Will be rounded to page boundary.
+ * \param type type of memory to allocate.
+ * \param address if not zero, will be set to the physical address of the
+ * allocated memory.
+ * \param handle on success will be set to a handle of the allocated memory.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_ALLOC ioctl, passing the
+ * arguments in a drm_agp_buffer structure.
+ */
+int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
+ unsigned long *address, drm_handle_t *handle)
+{
+ drm_agp_buffer_t b;
+
+ memclear(b);
+ *handle = DRM_AGP_NO_HANDLE;
+ b.size = size;
+ b.type = type;
+ if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
+ return -errno;
+ if (address != 0UL)
+ *address = b.physical;
+ *handle = b.handle;
+ return 0;
+}
+
+
+/**
+ * Free a chunk of AGP memory.
+ *
+ * \param fd file descriptor.
+ * \param handle handle to the allocated memory, as given by drmAgpAllocate().
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_FREE ioctl, passing the
+ * argument in a drm_agp_buffer structure.
+ */
+int drmAgpFree(int fd, drm_handle_t handle)
+{
+ drm_agp_buffer_t b;
+
+ memclear(b);
+ b.handle = handle;
+ if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b))
+ return -errno;
+ return 0;
+}
+
+
+/**
+ * Bind a chunk of AGP memory.
+ *
+ * \param fd file descriptor.
+ * \param handle handle to the allocated memory, as given by drmAgpAllocate().
+ * \param offset offset in bytes. It will round to page boundary.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_BIND ioctl, passing the
+ * argument in a drm_agp_binding structure.
+ */
+int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)
+{
+ drm_agp_binding_t b;
+
+ memclear(b);
+ b.handle = handle;
+ b.offset = offset;
+ if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b))
+ return -errno;
+ return 0;
+}
+
+
+/**
+ * Unbind a chunk of AGP memory.
+ *
+ * \param fd file descriptor.
+ * \param handle handle to the allocated memory, as given by drmAgpAllocate().
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_UNBIND ioctl, passing
+ * the argument in a drm_agp_binding structure.
+ */
+int drmAgpUnbind(int fd, drm_handle_t handle)
+{
+ drm_agp_binding_t b;
+
+ memclear(b);
+ b.handle = handle;
+ if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
+ return -errno;
+ return 0;
+}
+
+
+/**
+ * Get AGP driver major version number.
+ *
+ * \param fd file descriptor.
+ *
+ * \return major version number on success, or a negative value on failure..
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
+ * necessary information in a drm_agp_info structure.
+ */
+int drmAgpVersionMajor(int fd)
+{
+ drm_agp_info_t i;
+
+ memclear(i);
+
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ return -errno;
+ return i.agp_version_major;
+}
+
+
+/**
+ * Get AGP driver minor version number.
+ *
+ * \param fd file descriptor.
+ *
+ * \return minor version number on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
+ * necessary information in a drm_agp_info structure.
+ */
+int drmAgpVersionMinor(int fd)
+{
+ drm_agp_info_t i;
+
+ memclear(i);
+
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ return -errno;
+ return i.agp_version_minor;
+}
+
+
+/**
+ * Get AGP mode.
+ *
+ * \param fd file descriptor.
+ *
+ * \return mode on success, or zero on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
+ * necessary information in a drm_agp_info structure.
+ */
+unsigned long drmAgpGetMode(int fd)
+{
+ drm_agp_info_t i;
+
+ memclear(i);
+
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ return 0;
+ return i.mode;
+}
+
+
+/**
+ * Get AGP aperture base.
+ *
+ * \param fd file descriptor.
+ *
+ * \return aperture base on success, zero on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
+ * necessary information in a drm_agp_info structure.
+ */
+unsigned long drmAgpBase(int fd)
+{
+ drm_agp_info_t i;
+
+ memclear(i);
+
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ return 0;
+ return i.aperture_base;
+}
+
+
+/**
+ * Get AGP aperture size.
+ *
+ * \param fd file descriptor.
+ *
+ * \return aperture size on success, zero on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
+ * necessary information in a drm_agp_info structure.
+ */
+unsigned long drmAgpSize(int fd)
+{
+ drm_agp_info_t i;
+
+ memclear(i);
+
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ return 0;
+ return i.aperture_size;
+}
+
+
+/**
+ * Get used AGP memory.
+ *
+ * \param fd file descriptor.
+ *
+ * \return memory used on success, or zero on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
+ * necessary information in a drm_agp_info structure.
+ */
+unsigned long drmAgpMemoryUsed(int fd)
+{
+ drm_agp_info_t i;
+
+ memclear(i);
+
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ return 0;
+ return i.memory_used;
+}
+
+
+/**
+ * Get available AGP memory.
+ *
+ * \param fd file descriptor.
+ *
+ * \return memory available on success, or zero on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
+ * necessary information in a drm_agp_info structure.
+ */
+unsigned long drmAgpMemoryAvail(int fd)
+{
+ drm_agp_info_t i;
+
+ memclear(i);
+
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ return 0;
+ return i.memory_allowed;
+}
+
+
+/**
+ * Get hardware vendor ID.
+ *
+ * \param fd file descriptor.
+ *
+ * \return vendor ID on success, or zero on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
+ * necessary information in a drm_agp_info structure.
+ */
+unsigned int drmAgpVendorId(int fd)
+{
+ drm_agp_info_t i;
+
+ memclear(i);
+
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ return 0;
+ return i.id_vendor;
+}
+
+
+/**
+ * Get hardware device ID.
+ *
+ * \param fd file descriptor.
+ *
+ * \return zero on success, or zero on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
+ * necessary information in a drm_agp_info structure.
+ */
+unsigned int drmAgpDeviceId(int fd)
+{
+ drm_agp_info_t i;
+
+ memclear(i);
+
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ return 0;
+ return i.id_device;
+}
+
+int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle)
+{
+ drm_scatter_gather_t sg;
+
+ memclear(sg);
+
+ *handle = 0;
+ sg.size = size;
+ if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
+ return -errno;
+ *handle = sg.handle;
+ return 0;
+}
+
+int drmScatterGatherFree(int fd, drm_handle_t handle)
+{
+ drm_scatter_gather_t sg;
+
+ memclear(sg);
+ sg.handle = handle;
+ if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg))
+ return -errno;
+ return 0;
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param fd file descriptor.
+ * \param vbl pointer to a drmVBlank structure.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_WAIT_VBLANK ioctl.
+ */
+int drmWaitVBlank(int fd, drmVBlankPtr vbl)
+{
+ struct timespec timeout, cur;
+ int ret;
+
+ ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
+ if (ret < 0) {
+ fprintf(stderr, "clock_gettime failed: %s\n", strerror(errno));
+ goto out;
+ }
+ timeout.tv_sec++;
+
+ do {
+ ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
+ vbl->request.type &= ~DRM_VBLANK_RELATIVE;
+ if (ret && errno == EINTR) {
+ clock_gettime(CLOCK_MONOTONIC, &cur);
+ /* Timeout after 1s */
+ if (cur.tv_sec > timeout.tv_sec + 1 ||
+ (cur.tv_sec == timeout.tv_sec && cur.tv_nsec >=
+ timeout.tv_nsec)) {
+ errno = EBUSY;
+ ret = -1;
+ break;
+ }
+ }
+ } while (ret && errno == EINTR);
+
+out:
+ return ret;
+}
+
+int drmError(int err, const char *label)
+{
+ switch (err) {
+ case DRM_ERR_NO_DEVICE:
+ fprintf(stderr, "%s: no device\n", label);
+ break;
+ case DRM_ERR_NO_ACCESS:
+ fprintf(stderr, "%s: no access\n", label);
+ break;
+ case DRM_ERR_NOT_ROOT:
+ fprintf(stderr, "%s: not root\n", label);
+ break;
+ case DRM_ERR_INVALID:
+ fprintf(stderr, "%s: invalid args\n", label);
+ break;
+ default:
+ if (err < 0)
+ err = -err;
+ fprintf( stderr, "%s: error %d (%s)\n", label, err, strerror(err) );
+ break;
+ }
+
+ return 1;
+}
+
+/**
+ * Install IRQ handler.
+ *
+ * \param fd file descriptor.
+ * \param irq IRQ number.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_CONTROL ioctl, passing the
+ * argument in a drm_control structure.
+ */
+int drmCtlInstHandler(int fd, int irq)
+{
+ drm_control_t ctl;
+
+ memclear(ctl);
+ ctl.func = DRM_INST_HANDLER;
+ ctl.irq = irq;
+ if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
+ return -errno;
+ return 0;
+}
+
+
+/**
+ * Uninstall IRQ handler.
+ *
+ * \param fd file descriptor.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_CONTROL ioctl, passing the
+ * argument in a drm_control structure.
+ */
+int drmCtlUninstHandler(int fd)
+{
+ drm_control_t ctl;
+
+ memclear(ctl);
+ ctl.func = DRM_UNINST_HANDLER;
+ ctl.irq = 0;
+ if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
+ return -errno;
+ return 0;
+}
+
+int drmFinish(int fd, int context, drmLockFlags flags)
+{
+ drm_lock_t lock;
+
+ memclear(lock);
+ lock.context = context;
+ if (flags & DRM_LOCK_READY) lock.flags |= _DRM_LOCK_READY;
+ if (flags & DRM_LOCK_QUIESCENT) lock.flags |= _DRM_LOCK_QUIESCENT;
+ if (flags & DRM_LOCK_FLUSH) lock.flags |= _DRM_LOCK_FLUSH;
+ if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL;
+ if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
+ if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
+ if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock))
+ return -errno;
+ return 0;
+}
+
+/**
+ * Get IRQ from bus ID.
+ *
+ * \param fd file descriptor.
+ * \param busnum bus number.
+ * \param devnum device number.
+ * \param funcnum function number.
+ *
+ * \return IRQ number on success, or a negative value on failure.
+ *
+ * \internal
+ * This function is a wrapper around the DRM_IOCTL_IRQ_BUSID ioctl, passing the
+ * arguments in a drm_irq_busid structure.
+ */
+int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum)
+{
+ drm_irq_busid_t p;
+
+ memclear(p);
+ p.busnum = busnum;
+ p.devnum = devnum;
+ p.funcnum = funcnum;
+ if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
+ return -errno;
+ return p.irq;
+}
+
+int drmAddContextTag(int fd, drm_context_t context, void *tag)
+{
+ drmHashEntry *entry = drmGetEntry(fd);
+
+ if (drmHashInsert(entry->tagTable, context, tag)) {
+ drmHashDelete(entry->tagTable, context);
+ drmHashInsert(entry->tagTable, context, tag);
+ }
+ return 0;
+}
+
+int drmDelContextTag(int fd, drm_context_t context)
+{
+ drmHashEntry *entry = drmGetEntry(fd);
+
+ return drmHashDelete(entry->tagTable, context);
+}
+
+void *drmGetContextTag(int fd, drm_context_t context)
+{
+ drmHashEntry *entry = drmGetEntry(fd);
+ void *value;
+
+ if (drmHashLookup(entry->tagTable, context, &value))
+ return NULL;
+
+ return value;
+}
+
+int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
+ drm_handle_t handle)
+{
+ drm_ctx_priv_map_t map;
+
+ memclear(map);
+ map.ctx_id = ctx_id;
+ map.handle = (void *)(uintptr_t)handle;
+
+ if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
+ return -errno;
+ return 0;
+}
+
+int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
+ drm_handle_t *handle)
+{
+ drm_ctx_priv_map_t map;
+
+ memclear(map);
+ map.ctx_id = ctx_id;
+
+ if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
+ return -errno;
+ if (handle)
+ *handle = (drm_handle_t)(uintptr_t)map.handle;
+
+ return 0;
+}
+
+int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
+ drmMapType *type, drmMapFlags *flags, drm_handle_t *handle,
+ int *mtrr)
+{
+ drm_map_t map;
+
+ memclear(map);
+ map.offset = idx;
+ if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map))
+ return -errno;
+ *offset = map.offset;
+ *size = map.size;
+ *type = map.type;
+ *flags = map.flags;
+ *handle = (unsigned long)map.handle;
+ *mtrr = map.mtrr;
+ return 0;
+}
+
+int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid,
+ unsigned long *magic, unsigned long *iocs)
+{
+ drm_client_t client;
+
+ memclear(client);
+ client.idx = idx;
+ if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client))
+ return -errno;
+ *auth = client.auth;
+ *pid = client.pid;
+ *uid = client.uid;
+ *magic = client.magic;
+ *iocs = client.iocs;
+ return 0;
+}
+
+int drmGetStats(int fd, drmStatsT *stats)
+{
+ drm_stats_t s;
+ unsigned i;
+
+ memclear(s);
+ if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s))
+ return -errno;
+
+ stats->count = 0;
+ memset(stats, 0, sizeof(*stats));
+ if (s.count > sizeof(stats->data)/sizeof(stats->data[0]))
+ return -1;
+
+#define SET_VALUE \
+ stats->data[i].long_format = "%-20.20s"; \
+ stats->data[i].rate_format = "%8.8s"; \
+ stats->data[i].isvalue = 1; \
+ stats->data[i].verbose = 0
+
+#define SET_COUNT \
+ stats->data[i].long_format = "%-20.20s"; \
+ stats->data[i].rate_format = "%5.5s"; \
+ stats->data[i].isvalue = 0; \
+ stats->data[i].mult_names = "kgm"; \
+ stats->data[i].mult = 1000; \
+ stats->data[i].verbose = 0
+
+#define SET_BYTE \
+ stats->data[i].long_format = "%-20.20s"; \
+ stats->data[i].rate_format = "%5.5s"; \
+ stats->data[i].isvalue = 0; \
+ stats->data[i].mult_names = "KGM"; \
+ stats->data[i].mult = 1024; \
+ stats->data[i].verbose = 0
+
+
+ stats->count = s.count;
+ for (i = 0; i < s.count; i++) {
+ stats->data[i].value = s.data[i].value;
+ switch (s.data[i].type) {
+ case _DRM_STAT_LOCK:
+ stats->data[i].long_name = "Lock";
+ stats->data[i].rate_name = "Lock";
+ SET_VALUE;
+ break;
+ case _DRM_STAT_OPENS:
+ stats->data[i].long_name = "Opens";
+ stats->data[i].rate_name = "O";
+ SET_COUNT;
+ stats->data[i].verbose = 1;
+ break;
+ case _DRM_STAT_CLOSES:
+ stats->data[i].long_name = "Closes";
+ stats->data[i].rate_name = "Lock";
+ SET_COUNT;
+ stats->data[i].verbose = 1;
+ break;
+ case _DRM_STAT_IOCTLS:
+ stats->data[i].long_name = "Ioctls";
+ stats->data[i].rate_name = "Ioc/s";
+ SET_COUNT;
+ break;
+ case _DRM_STAT_LOCKS:
+ stats->data[i].long_name = "Locks";
+ stats->data[i].rate_name = "Lck/s";
+ SET_COUNT;
+ break;
+ case _DRM_STAT_UNLOCKS:
+ stats->data[i].long_name = "Unlocks";
+ stats->data[i].rate_name = "Unl/s";
+ SET_COUNT;
+ break;
+ case _DRM_STAT_IRQ:
+ stats->data[i].long_name = "IRQs";
+ stats->data[i].rate_name = "IRQ/s";
+ SET_COUNT;
+ break;
+ case _DRM_STAT_PRIMARY:
+ stats->data[i].long_name = "Primary Bytes";
+ stats->data[i].rate_name = "PB/s";
+ SET_BYTE;
+ break;
+ case _DRM_STAT_SECONDARY:
+ stats->data[i].long_name = "Secondary Bytes";
+ stats->data[i].rate_name = "SB/s";
+ SET_BYTE;
+ break;
+ case _DRM_STAT_DMA:
+ stats->data[i].long_name = "DMA";
+ stats->data[i].rate_name = "DMA/s";
+ SET_COUNT;
+ break;
+ case _DRM_STAT_SPECIAL:
+ stats->data[i].long_name = "Special DMA";
+ stats->data[i].rate_name = "dma/s";
+ SET_COUNT;
+ break;
+ case _DRM_STAT_MISSED:
+ stats->data[i].long_name = "Miss";
+ stats->data[i].rate_name = "Ms/s";
+ SET_COUNT;
+ break;
+ case _DRM_STAT_VALUE:
+ stats->data[i].long_name = "Value";
+ stats->data[i].rate_name = "Value";
+ SET_VALUE;
+ break;
+ case _DRM_STAT_BYTE:
+ stats->data[i].long_name = "Bytes";
+ stats->data[i].rate_name = "B/s";
+ SET_BYTE;
+ break;
+ case _DRM_STAT_COUNT:
+ default:
+ stats->data[i].long_name = "Count";
+ stats->data[i].rate_name = "Cnt/s";
+ SET_COUNT;
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Issue a set-version ioctl.
+ *
+ * \param fd file descriptor.
+ * \param drmCommandIndex command index
+ * \param data source pointer of the data to be read and written.
+ * \param size size of the data to be read and written.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * It issues a read-write ioctl given by
+ * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
+ */
+int drmSetInterfaceVersion(int fd, drmSetVersion *version)
+{
+ int retcode = 0;
+ drm_set_version_t sv;
+
+ memclear(sv);
+ sv.drm_di_major = version->drm_di_major;
+ sv.drm_di_minor = version->drm_di_minor;
+ sv.drm_dd_major = version->drm_dd_major;
+ sv.drm_dd_minor = version->drm_dd_minor;
+
+ if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
+ retcode = -errno;
+ }
+
+ version->drm_di_major = sv.drm_di_major;
+ version->drm_di_minor = sv.drm_di_minor;
+ version->drm_dd_major = sv.drm_dd_major;
+ version->drm_dd_minor = sv.drm_dd_minor;
+
+ return retcode;
+}
+
+/**
+ * Send a device-specific command.
+ *
+ * \param fd file descriptor.
+ * \param drmCommandIndex command index
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * It issues a ioctl given by
+ * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
+ */
+int drmCommandNone(int fd, unsigned long drmCommandIndex)
+{
+ unsigned long request;
+
+ request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex);
+
+ if (drmIoctl(fd, request, NULL)) {
+ return -errno;
+ }
+ return 0;
+}
+
+
+/**
+ * Send a device-specific read command.
+ *
+ * \param fd file descriptor.
+ * \param drmCommandIndex command index
+ * \param data destination pointer of the data to be read.
+ * \param size size of the data to be read.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * It issues a read ioctl given by
+ * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
+ */
+int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,
+ unsigned long size)
+{
+ unsigned long request;
+
+ request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE,
+ DRM_COMMAND_BASE + drmCommandIndex, size);
+
+ if (drmIoctl(fd, request, data)) {
+ return -errno;
+ }
+ return 0;
+}
+
+
+/**
+ * Send a device-specific write command.
+ *
+ * \param fd file descriptor.
+ * \param drmCommandIndex command index
+ * \param data source pointer of the data to be written.
+ * \param size size of the data to be written.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * It issues a write ioctl given by
+ * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
+ */
+int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,
+ unsigned long size)
+{
+ unsigned long request;
+
+ request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE,
+ DRM_COMMAND_BASE + drmCommandIndex, size);
+
+ if (drmIoctl(fd, request, data)) {
+ return -errno;
+ }
+ return 0;
+}
+
+
+/**
+ * Send a device-specific read-write command.
+ *
+ * \param fd file descriptor.
+ * \param drmCommandIndex command index
+ * \param data source pointer of the data to be read and written.
+ * \param size size of the data to be read and written.
+ *
+ * \return zero on success, or a negative value on failure.
+ *
+ * \internal
+ * It issues a read-write ioctl given by
+ * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
+ */
+int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
+ unsigned long size)
+{
+ unsigned long request;
+
+ request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE,
+ DRM_COMMAND_BASE + drmCommandIndex, size);
+
+ if (drmIoctl(fd, request, data))
+ return -errno;
+ return 0;
+}
+
+#define DRM_MAX_FDS 16
+static struct {
+ char *BusID;
+ int fd;
+ int refcount;
+ int type;
+} connection[DRM_MAX_FDS];
+
+static int nr_fds = 0;
+
+int drmOpenOnce(void *unused,
+ const char *BusID,
+ int *newlyopened)
+{
+ return drmOpenOnceWithType(BusID, newlyopened, DRM_NODE_PRIMARY);
+}
+
+int drmOpenOnceWithType(const char *BusID, int *newlyopened, int type)
+{
+ int i;
+ int fd;
+
+ for (i = 0; i < nr_fds; i++)
+ if ((strcmp(BusID, connection[i].BusID) == 0) &&
+ (connection[i].type == type)) {
+ connection[i].refcount++;
+ *newlyopened = 0;
+ return connection[i].fd;
+ }
+
+ fd = drmOpenWithType(NULL, BusID, type);
+ if (fd < 0 || nr_fds == DRM_MAX_FDS)
+ return fd;
+
+ connection[nr_fds].BusID = strdup(BusID);
+ connection[nr_fds].fd = fd;
+ connection[nr_fds].refcount = 1;
+ connection[nr_fds].type = type;
+ *newlyopened = 1;
+
+ if (0)
+ fprintf(stderr, "saved connection %d for %s %d\n",
+ nr_fds, connection[nr_fds].BusID,
+ strcmp(BusID, connection[nr_fds].BusID));
+
+ nr_fds++;
+
+ return fd;
+}
+
+void drmCloseOnce(int fd)
+{
+ int i;
+
+ for (i = 0; i < nr_fds; i++) {
+ if (fd == connection[i].fd) {
+ if (--connection[i].refcount == 0) {
+ drmClose(connection[i].fd);
+ free(connection[i].BusID);
+
+ if (i < --nr_fds)
+ connection[i] = connection[nr_fds];
+
+ return;
+ }
+ }
+ }
+}
+
+int drmSetMaster(int fd)
+{
+ return drmIoctl(fd, DRM_IOCTL_SET_MASTER, NULL);
+}
+
+int drmDropMaster(int fd)
+{
+ return drmIoctl(fd, DRM_IOCTL_DROP_MASTER, NULL);
+}
+
+char *drmGetDeviceNameFromFd(int fd)
+{
+ char name[128];
+ struct stat sbuf;
+ dev_t d;
+ int i;
+
+ /* The whole drmOpen thing is a fiasco and we need to find a way
+ * back to just using open(2). For now, however, lets just make
+ * things worse with even more ad hoc directory walking code to
+ * discover the device file name. */
+
+ fstat(fd, &sbuf);
+ d = sbuf.st_rdev;
+
+ for (i = 0; i < DRM_MAX_MINOR; i++) {
+ snprintf(name, sizeof name, DRM_DEV_NAME, DRM_DIR_NAME, i);
+ if (stat(name, &sbuf) == 0 && sbuf.st_rdev == d)
+ break;
+ }
+ if (i == DRM_MAX_MINOR)
+ return NULL;
+
+ return strdup(name);
+}
+
+int drmGetNodeTypeFromFd(int fd)
+{
+ struct stat sbuf;
+ int maj, min, type;
+
+ if (fstat(fd, &sbuf))
+ return -1;
+
+ maj = major(sbuf.st_rdev);
+ min = minor(sbuf.st_rdev);
+
+ if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ type = drmGetMinorType(min);
+ if (type == -1)
+ errno = ENODEV;
+ return type;
+}
+
+int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd)
+{
+ struct drm_prime_handle args;
+ int ret;
+
+ memclear(args);
+ args.fd = -1;
+ args.handle = handle;
+ args.flags = flags;
+ ret = drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
+ if (ret)
+ return ret;
+
+ *prime_fd = args.fd;
+ return 0;
+}
+
+int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle)
+{
+ struct drm_prime_handle args;
+ int ret;
+
+ memclear(args);
+ args.fd = prime_fd;
+ ret = drmIoctl(fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
+ if (ret)
+ return ret;
+
+ *handle = args.handle;
+ return 0;
+}
+
+static char *drmGetMinorNameForFD(int fd, int type)
+{
+#ifdef __linux__
+ DIR *sysdir;
+ struct dirent *pent, *ent;
+ struct stat sbuf;
+ const char *name = drmGetMinorName(type);
+ int len;
+ char dev_name[64], buf[64];
+ long name_max;
+ int maj, min;
+
+ if (!name)
+ return NULL;
+
+ len = strlen(name);
+
+ if (fstat(fd, &sbuf))
+ return NULL;
+
+ maj = major(sbuf.st_rdev);
+ min = minor(sbuf.st_rdev);
+
+ if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode))
+ return NULL;
+
+ snprintf(buf, sizeof(buf), "/sys/dev/char/%d:%d/device/drm", maj, min);
+
+ sysdir = opendir(buf);
+ if (!sysdir)
+ return NULL;
+
+ name_max = fpathconf(dirfd(sysdir), _PC_NAME_MAX);
+ if (name_max == -1)
+ goto out_close_dir;
+
+ pent = malloc(offsetof(struct dirent, d_name) + name_max + 1);
+ if (pent == NULL)
+ goto out_close_dir;
+
+ while (readdir_r(sysdir, pent, &ent) == 0 && ent != NULL) {
+ if (strncmp(ent->d_name, name, len) == 0) {
+ snprintf(dev_name, sizeof(dev_name), DRM_DIR_NAME "/%s",
+ ent->d_name);
+
+ free(pent);
+ closedir(sysdir);
+
+ return strdup(dev_name);
+ }
+ }
+
+ free(pent);
+
+out_close_dir:
+ closedir(sysdir);
+#else
+#warning "Missing implementation of drmGetMinorNameForFD"
+#endif
+ return NULL;
+}
+
+char *drmGetPrimaryDeviceNameFromFd(int fd)
+{
+ return drmGetMinorNameForFD(fd, DRM_NODE_PRIMARY);
+}
+
+char *drmGetRenderDeviceNameFromFd(int fd)
+{
+ return drmGetMinorNameForFD(fd, DRM_NODE_RENDER);
+}
+
+static int drmParseSubsystemType(int maj, int min)
+{
+#ifdef __linux__
+ char path[PATH_MAX + 1];
+ char link[PATH_MAX + 1] = "";
+ char *name;
+
+ snprintf(path, PATH_MAX, "/sys/dev/char/%d:%d/device/subsystem",
+ maj, min);
+
+ if (readlink(path, link, PATH_MAX) < 0)
+ return -errno;
+
+ name = strrchr(link, '/');
+ if (!name)
+ return -EINVAL;
+
+ if (strncmp(name, "/pci", 4) == 0)
+ return DRM_BUS_PCI;
+
+ return -EINVAL;
+#else
+#warning "Missing implementation of drmParseSubsystemType"
+ return -EINVAL;
+#endif
+}
+
+static int drmParsePciBusInfo(int maj, int min, drmPciBusInfoPtr info)
+{
+#ifdef __linux__
+ char path[PATH_MAX + 1];
+ char data[128 + 1];
+ char *str;
+ int domain, bus, dev, func;
+ int fd, ret;
+
+ snprintf(path, PATH_MAX, "/sys/dev/char/%d:%d/device/uevent", maj, min);
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+
+ ret = read(fd, data, sizeof(data));
+ data[128] = '\0';
+ close(fd);
+ if (ret < 0)
+ return -errno;
+
+#define TAG "PCI_SLOT_NAME="
+ str = strstr(data, TAG);
+ if (str == NULL)
+ return -EINVAL;
+
+ if (sscanf(str, TAG "%04x:%02x:%02x.%1u",
+ &domain, &bus, &dev, &func) != 4)
+ return -EINVAL;
+#undef TAG
+
+ info->domain = domain;
+ info->bus = bus;
+ info->dev = dev;
+ info->func = func;
+
+ return 0;
+#else
+#warning "Missing implementation of drmParsePciBusInfo"
+ return -EINVAL;
+#endif
+}
+
+static int drmCompareBusInfo(drmDevicePtr a, drmDevicePtr b)
+{
+ if (a == NULL || b == NULL)
+ return -1;
+
+ if (a->bustype != b->bustype)
+ return -1;
+
+ switch (a->bustype) {
+ case DRM_BUS_PCI:
+ return memcmp(a->businfo.pci, b->businfo.pci, sizeof(drmPciBusInfo));
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int drmGetNodeType(const char *name)
+{
+ if (strncmp(name, DRM_PRIMARY_MINOR_NAME,
+ sizeof(DRM_PRIMARY_MINOR_NAME) - 1) == 0)
+ return DRM_NODE_PRIMARY;
+
+ if (strncmp(name, DRM_CONTROL_MINOR_NAME,
+ sizeof(DRM_CONTROL_MINOR_NAME ) - 1) == 0)
+ return DRM_NODE_CONTROL;
+
+ if (strncmp(name, DRM_RENDER_MINOR_NAME,
+ sizeof(DRM_RENDER_MINOR_NAME) - 1) == 0)
+ return DRM_NODE_RENDER;
+
+ return -EINVAL;
+}
+
+static int drmGetMaxNodeName(void)
+{
+ return sizeof(DRM_DIR_NAME) +
+ MAX3(sizeof(DRM_PRIMARY_MINOR_NAME),
+ sizeof(DRM_CONTROL_MINOR_NAME),
+ sizeof(DRM_RENDER_MINOR_NAME)) +
+ 3 /* length of the node number */;
+}
+
+static int drmParsePciDeviceInfo(const char *d_name,
+ drmPciDeviceInfoPtr device)
+{
+#ifdef __linux__
+ char path[PATH_MAX + 1];
+ unsigned char config[64];
+ int fd, ret;
+
+ snprintf(path, PATH_MAX, "/sys/class/drm/%s/device/config", d_name);
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+
+ ret = read(fd, config, sizeof(config));
+ close(fd);
+ if (ret < 0)
+ return -errno;
+
+ device->vendor_id = config[0] | (config[1] << 8);
+ device->device_id = config[2] | (config[3] << 8);
+ device->revision_id = config[8];
+ device->subvendor_id = config[44] | (config[45] << 8);
+ device->subdevice_id = config[46] | (config[47] << 8);
+
+ return 0;
+#else
+#warning "Missing implementation of drmParsePciDeviceInfo"
+ return -EINVAL;
+#endif
+}
+
+void drmFreeDevice(drmDevicePtr *device)
+{
+ if (device == NULL)
+ return;
+
+ free(*device);
+ *device = NULL;
+}
+
+void drmFreeDevices(drmDevicePtr devices[], int count)
+{
+ int i;
+
+ if (devices == NULL)
+ return;
+
+ for (i = 0; i < count; i++)
+ if (devices[i])
+ drmFreeDevice(&devices[i]);
+}
+
+static int drmProcessPciDevice(drmDevicePtr *device, const char *d_name,
+ const char *node, int node_type,
+ int maj, int min, bool fetch_deviceinfo)
+{
+ const int max_node_str = ALIGN(drmGetMaxNodeName(), sizeof(void *));
+ int ret, i;
+ char *addr;
+
+ *device = calloc(1, sizeof(drmDevice) +
+ (DRM_NODE_MAX * (sizeof(void *) + max_node_str)) +
+ sizeof(drmPciBusInfo) +
+ sizeof(drmPciDeviceInfo));
+ if (!*device)
+ return -ENOMEM;
+
+ addr = (char*)*device;
+
+ (*device)->bustype = DRM_BUS_PCI;
+ (*device)->available_nodes = 1 << node_type;
+
+ addr += sizeof(drmDevice);
+ (*device)->nodes = (char**)addr;
+
+ addr += DRM_NODE_MAX * sizeof(void *);
+ for (i = 0; i < DRM_NODE_MAX; i++) {
+ (*device)->nodes[i] = addr;
+ addr += max_node_str;
+ }
+ memcpy((*device)->nodes[node_type], node, max_node_str);
+
+ (*device)->businfo.pci = (drmPciBusInfoPtr)addr;
+
+ ret = drmParsePciBusInfo(maj, min, (*device)->businfo.pci);
+ if (ret)
+ goto free_device;
+
+ // Fetch the device info if the user has requested it
+ if (fetch_deviceinfo) {
+ addr += sizeof(drmPciBusInfo);
+ (*device)->deviceinfo.pci = (drmPciDeviceInfoPtr)addr;
+
+ ret = drmParsePciDeviceInfo(d_name, (*device)->deviceinfo.pci);
+ if (ret)
+ goto free_device;
+ }
+ return 0;
+
+free_device:
+ free(*device);
+ *device = NULL;
+ return ret;
+}
+
+/* Consider devices located on the same bus as duplicate and fold the respective
+ * entries into a single one.
+ *
+ * Note: this leaves "gaps" in the array, while preserving the length.
+ */
+static void drmFoldDuplicatedDevices(drmDevicePtr local_devices[], int count)
+{
+ int node_type, i, j;
+
+ for (i = 0; i < count; i++) {
+ for (j = i + 1; j < count; j++) {
+ if (drmCompareBusInfo(local_devices[i], local_devices[j]) == 0) {
+ local_devices[i]->available_nodes |= local_devices[j]->available_nodes;
+ node_type = log2(local_devices[j]->available_nodes);
+ memcpy(local_devices[i]->nodes[node_type],
+ local_devices[j]->nodes[node_type], drmGetMaxNodeName());
+ drmFreeDevice(&local_devices[j]);
+ }
+ }
+ }
+}
+
+/**
+ * Get information about the opened drm device
+ *
+ * \param fd file descriptor of the drm device
+ * \param device the address of a drmDevicePtr where the information
+ * will be allocated in stored
+ *
+ * \return zero on success, negative error code otherwise.
+ */
+int drmGetDevice(int fd, drmDevicePtr *device)
+{
+ drmDevicePtr *local_devices;
+ drmDevicePtr d;
+ DIR *sysdir;
+ struct dirent *dent;
+ struct stat sbuf;
+ char node[PATH_MAX + 1];
+ int node_type, subsystem_type;
+ int maj, min;
+ int ret, i, node_count;
+ int max_count = 16;
+ dev_t find_rdev;
+
+ if (fd == -1 || device == NULL)
+ return -EINVAL;
+
+ if (fstat(fd, &sbuf))
+ return -errno;
+
+ find_rdev = sbuf.st_rdev;
+ maj = major(sbuf.st_rdev);
+ min = minor(sbuf.st_rdev);
+
+ if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode))
+ return -EINVAL;
+
+ subsystem_type = drmParseSubsystemType(maj, min);
+
+ local_devices = calloc(max_count, sizeof(drmDevicePtr));
+ if (local_devices == NULL)
+ return -ENOMEM;
+
+ sysdir = opendir(DRM_DIR_NAME);
+ if (!sysdir) {
+ ret = -errno;
+ goto free_locals;
+ }
+
+ i = 0;
+ while ((dent = readdir(sysdir))) {
+ node_type = drmGetNodeType(dent->d_name);
+ if (node_type < 0)
+ continue;
+
+ snprintf(node, PATH_MAX, "%s/%s", DRM_DIR_NAME, dent->d_name);
+ if (stat(node, &sbuf))
+ continue;
+
+ maj = major(sbuf.st_rdev);
+ min = minor(sbuf.st_rdev);
+
+ if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode))
+ continue;
+
+ if (drmParseSubsystemType(maj, min) != subsystem_type)
+ continue;
+
+ switch (subsystem_type) {
+ case DRM_BUS_PCI:
+ ret = drmProcessPciDevice(&d, dent->d_name, node, node_type,
+ maj, min, true);
+ if (ret)
+ goto free_devices;
+
+ break;
+ default:
+ fprintf(stderr, "The subsystem type is not supported yet\n");
+ continue;
+ }
+
+ if (i >= max_count) {
+ drmDevicePtr *temp;
+
+ max_count += 16;
+ temp = realloc(local_devices, max_count * sizeof(drmDevicePtr));
+ if (!temp)
+ goto free_devices;
+ local_devices = temp;
+ }
+
+ /* store target at local_devices[0] for ease to use below */
+ if (find_rdev == sbuf.st_rdev && i) {
+ local_devices[i] = local_devices[0];
+ local_devices[0] = d;
+ }
+ else
+ local_devices[i] = d;
+ i++;
+ }
+ node_count = i;
+
+ drmFoldDuplicatedDevices(local_devices, node_count);
+
+ *device = local_devices[0];
+ drmFreeDevices(&local_devices[1], node_count - 1);
+
+ closedir(sysdir);
+ free(local_devices);
+ if (*device == NULL)
+ return -ENODEV;
+ return 0;
+
+free_devices:
+ drmFreeDevices(local_devices, i);
+ closedir(sysdir);
+
+free_locals:
+ free(local_devices);
+ return ret;
+}
+
+/**
+ * Get drm devices on the system
+ *
+ * \param devices the array of devices with drmDevicePtr elements
+ * can be NULL to get the device number first
+ * \param max_devices the maximum number of devices for the array
+ *
+ * \return on error - negative error code,
+ * if devices is NULL - total number of devices available on the system,
+ * alternatively the number of devices stored in devices[], which is
+ * capped by the max_devices.
+ */
+int drmGetDevices(drmDevicePtr devices[], int max_devices)
+{
+ drmDevicePtr *local_devices;
+ drmDevicePtr device;
+ DIR *sysdir;
+ struct dirent *dent;
+ struct stat sbuf;
+ char node[PATH_MAX + 1];
+ int node_type, subsystem_type;
+ int maj, min;
+ int ret, i, node_count, device_count;
+ int max_count = 16;
+
+ local_devices = calloc(max_count, sizeof(drmDevicePtr));
+ if (local_devices == NULL)
+ return -ENOMEM;
+
+ sysdir = opendir(DRM_DIR_NAME);
+ if (!sysdir) {
+ ret = -errno;
+ goto free_locals;
+ }
+
+ i = 0;
+ while ((dent = readdir(sysdir))) {
+ node_type = drmGetNodeType(dent->d_name);
+ if (node_type < 0)
+ continue;
+
+ snprintf(node, PATH_MAX, "%s/%s", DRM_DIR_NAME, dent->d_name);
+ if (stat(node, &sbuf))
+ continue;
+
+ maj = major(sbuf.st_rdev);
+ min = minor(sbuf.st_rdev);
+
+ if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode))
+ continue;
+
+ subsystem_type = drmParseSubsystemType(maj, min);
+
+ if (subsystem_type < 0)
+ continue;
+
+ switch (subsystem_type) {
+ case DRM_BUS_PCI:
+ ret = drmProcessPciDevice(&device, dent->d_name, node, node_type,
+ maj, min, devices != NULL);
+ if (ret)
+ goto free_devices;
+
+ break;
+ default:
+ fprintf(stderr, "The subsystem type is not supported yet\n");
+ continue;
+ }
+
+ if (i >= max_count) {
+ drmDevicePtr *temp;
+
+ max_count += 16;
+ temp = realloc(local_devices, max_count * sizeof(drmDevicePtr));
+ if (!temp)
+ goto free_devices;
+ local_devices = temp;
+ }
+
+ local_devices[i] = device;
+ i++;
+ }
+ node_count = i;
+
+ drmFoldDuplicatedDevices(local_devices, node_count);
+
+ device_count = 0;
+ for (i = 0; i < node_count; i++) {
+ if (!local_devices[i])
+ continue;
+
+ if ((devices != NULL) && (device_count < max_devices))
+ devices[device_count] = local_devices[i];
+ else
+ drmFreeDevice(&local_devices[i]);
+
+ device_count++;
+ }
+
+ closedir(sysdir);
+ free(local_devices);
+ return device_count;
+
+free_devices:
+ drmFreeDevices(local_devices, i);
+ closedir(sysdir);
+
+free_locals:
+ free(local_devices);
+ return ret;
+}
diff --git a/chromium/third_party/libdrm/src/xf86drm.h b/chromium/third_party/libdrm/src/xf86drm.h
new file mode 100644
index 00000000000..481d882aa01
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86drm.h
@@ -0,0 +1,803 @@
+/**
+ * \file xf86drm.h
+ * OS-independent header for DRM user-level library interface.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _XF86DRM_H_
+#define _XF86DRM_H_
+
+#include <stdarg.h>
+#include <sys/types.h>
+#include <stdint.h>
+#include <drm.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef DRM_MAX_MINOR
+#define DRM_MAX_MINOR 16
+#endif
+
+#if defined(__linux__)
+
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOC_VOID _IOC_NONE
+#define DRM_IOC_READ _IOC_READ
+#define DRM_IOC_WRITE _IOC_WRITE
+#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+
+#else /* One of the *BSDs */
+
+#include <sys/ioccom.h>
+#define DRM_IOCTL_NR(n) ((n) & 0xff)
+#define DRM_IOC_VOID IOC_VOID
+#define DRM_IOC_READ IOC_OUT
+#define DRM_IOC_WRITE IOC_IN
+#define DRM_IOC_READWRITE IOC_INOUT
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+
+#endif
+
+ /* Defaults, if nothing set in xf86config */
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+/* Default /dev/dri directory permissions 0755 */
+#define DRM_DEV_DIRMODE \
+ (S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+
+#ifdef __OpenBSD__
+#define DRM_DIR_NAME "/dev"
+#define DRM_DEV_NAME "%s/drm%d"
+#define DRM_CONTROL_DEV_NAME "%s/drmC%d"
+#define DRM_RENDER_DEV_NAME "%s/drmR%d"
+#else
+#define DRM_DIR_NAME "/dev/dri"
+#define DRM_DEV_NAME "%s/card%d"
+#define DRM_CONTROL_DEV_NAME "%s/controlD%d"
+#define DRM_RENDER_DEV_NAME "%s/renderD%d"
+#define DRM_PROC_NAME "/proc/dri/" /* For backward Linux compatibility */
+#endif
+
+#define DRM_ERR_NO_DEVICE (-1001)
+#define DRM_ERR_NO_ACCESS (-1002)
+#define DRM_ERR_NOT_ROOT (-1003)
+#define DRM_ERR_INVALID (-1004)
+#define DRM_ERR_NO_FD (-1005)
+
+#define DRM_AGP_NO_HANDLE 0
+
+typedef unsigned int drmSize, *drmSizePtr; /**< For mapped regions */
+typedef void *drmAddress, **drmAddressPtr; /**< For mapped regions */
+
+#if (__GNUC__ >= 3)
+#define DRM_PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a)))
+#else
+#define DRM_PRINTFLIKE(f, a)
+#endif
+
+typedef struct _drmServerInfo {
+ int (*debug_print)(const char *format, va_list ap) DRM_PRINTFLIKE(1,0);
+ int (*load_module)(const char *name);
+ void (*get_perms)(gid_t *, mode_t *);
+} drmServerInfo, *drmServerInfoPtr;
+
+typedef struct drmHashEntry {
+ int fd;
+ void (*f)(int, void *, void *);
+ void *tagTable;
+} drmHashEntry;
+
+extern int drmIoctl(int fd, unsigned long request, void *arg);
+extern void *drmGetHashTable(void);
+extern drmHashEntry *drmGetEntry(int fd);
+
+/**
+ * Driver version information.
+ *
+ * \sa drmGetVersion() and drmSetVersion().
+ */
+typedef struct _drmVersion {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ int name_len; /**< Length of name buffer */
+ char *name; /**< Name of driver */
+ int date_len; /**< Length of date buffer */
+ char *date; /**< User-space buffer to hold date */
+ int desc_len; /**< Length of desc buffer */
+ char *desc; /**< User-space buffer to hold desc */
+} drmVersion, *drmVersionPtr;
+
+typedef struct _drmStats {
+ unsigned long count; /**< Number of data */
+ struct {
+ unsigned long value; /**< Value from kernel */
+ const char *long_format; /**< Suggested format for long_name */
+ const char *long_name; /**< Long name for value */
+ const char *rate_format; /**< Suggested format for rate_name */
+ const char *rate_name; /**< Short name for value per second */
+ int isvalue; /**< True if value (vs. counter) */
+ const char *mult_names; /**< Multiplier names (e.g., "KGM") */
+ int mult; /**< Multiplier value (e.g., 1024) */
+ int verbose; /**< Suggest only in verbose output */
+ } data[15];
+} drmStatsT;
+
+
+ /* All of these enums *MUST* match with the
+ kernel implementation -- so do *NOT*
+ change them! (The drmlib implementation
+ will just copy the flags instead of
+ translating them.) */
+typedef enum {
+ DRM_FRAME_BUFFER = 0, /**< WC, no caching, no core dump */
+ DRM_REGISTERS = 1, /**< no caching, no core dump */
+ DRM_SHM = 2, /**< shared, cached */
+ DRM_AGP = 3, /**< AGP/GART */
+ DRM_SCATTER_GATHER = 4, /**< PCI scatter/gather */
+ DRM_CONSISTENT = 5 /**< PCI consistent */
+} drmMapType;
+
+typedef enum {
+ DRM_RESTRICTED = 0x0001, /**< Cannot be mapped to client-virtual */
+ DRM_READ_ONLY = 0x0002, /**< Read-only in client-virtual */
+ DRM_LOCKED = 0x0004, /**< Physical pages locked */
+ DRM_KERNEL = 0x0008, /**< Kernel requires access */
+ DRM_WRITE_COMBINING = 0x0010, /**< Use write-combining, if available */
+ DRM_CONTAINS_LOCK = 0x0020, /**< SHM page that contains lock */
+ DRM_REMOVABLE = 0x0040 /**< Removable mapping */
+} drmMapFlags;
+
+/**
+ * \warning These values *MUST* match drm.h
+ */
+typedef enum {
+ /** \name Flags for DMA buffer dispatch */
+ /*@{*/
+ DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note the buffer may not yet have been
+ * processed by the hardware -- getting a
+ * hardware lock with the hardware quiescent
+ * will ensure that the buffer has been
+ * processed.
+ */
+ DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+ /*@}*/
+
+ /** \name Flags for DMA buffer request */
+ /*@{*/
+ DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+ /*@}*/
+} drmDMAFlags;
+
+typedef enum {
+ DRM_PAGE_ALIGN = 0x01,
+ DRM_AGP_BUFFER = 0x02,
+ DRM_SG_BUFFER = 0x04,
+ DRM_FB_BUFFER = 0x08,
+ DRM_PCI_BUFFER_RO = 0x10
+} drmBufDescFlags;
+
+typedef enum {
+ DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+} drmLockFlags;
+
+typedef enum {
+ DRM_CONTEXT_PRESERVED = 0x01, /**< This context is preserved and
+ never swapped. */
+ DRM_CONTEXT_2DONLY = 0x02 /**< This context is for 2D rendering only. */
+} drm_context_tFlags, *drm_context_tFlagsPtr;
+
+typedef struct _drmBufDesc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+} drmBufDesc, *drmBufDescPtr;
+
+typedef struct _drmBufInfo {
+ int count; /**< Number of buffers described in list */
+ drmBufDescPtr list; /**< List of buffer descriptions */
+} drmBufInfo, *drmBufInfoPtr;
+
+typedef struct _drmBuf {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ drmAddress address; /**< Address */
+} drmBuf, *drmBufPtr;
+
+/**
+ * Buffer mapping information.
+ *
+ * Used by drmMapBufs() and drmUnmapBufs() to store information about the
+ * mapped buffers.
+ */
+typedef struct _drmBufMap {
+ int count; /**< Number of buffers mapped */
+ drmBufPtr list; /**< Buffers */
+} drmBufMap, *drmBufMapPtr;
+
+typedef struct _drmLock {
+ volatile unsigned int lock;
+ char padding[60];
+ /* This is big enough for most current (and future?) architectures:
+ DEC Alpha: 32 bytes
+ Intel Merced: ?
+ Intel P5/PPro/PII/PIII: 32 bytes
+ Intel StrongARM: 32 bytes
+ Intel i386/i486: 16 bytes
+ MIPS: 32 bytes (?)
+ Motorola 68k: 16 bytes
+ Motorola PowerPC: 32 bytes
+ Sun SPARC: 32 bytes
+ */
+} drmLock, *drmLockPtr;
+
+/**
+ * Indices here refer to the offset into
+ * list in drmBufInfo
+ */
+typedef struct _drmDMAReq {
+ drm_context_t context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int *send_list; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send, in bytes */
+ drmDMAFlags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size of buffers requested */
+ int *request_list; /**< Buffer information */
+ int *request_sizes; /**< Minimum acceptable sizes */
+ int granted_count; /**< Number of buffers granted at this size */
+} drmDMAReq, *drmDMAReqPtr;
+
+typedef struct _drmRegion {
+ drm_handle_t handle;
+ unsigned int offset;
+ drmSize size;
+ drmAddress map;
+} drmRegion, *drmRegionPtr;
+
+typedef struct _drmTextureRegion {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding; /**< Explicitly pad this out */
+ unsigned int age;
+} drmTextureRegion, *drmTextureRegionPtr;
+
+
+typedef enum {
+ DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ /* bits 1-6 are reserved for high crtcs */
+ DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
+ DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
+} drmVBlankSeqType;
+#define DRM_VBLANK_HIGH_CRTC_SHIFT 1
+
+typedef struct _drmVBlankReq {
+ drmVBlankSeqType type;
+ unsigned int sequence;
+ unsigned long signal;
+} drmVBlankReq, *drmVBlankReqPtr;
+
+typedef struct _drmVBlankReply {
+ drmVBlankSeqType type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+} drmVBlankReply, *drmVBlankReplyPtr;
+
+typedef union _drmVBlank {
+ drmVBlankReq request;
+ drmVBlankReply reply;
+} drmVBlank, *drmVBlankPtr;
+
+typedef struct _drmSetVersion {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+} drmSetVersion, *drmSetVersionPtr;
+
+#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
+
+#define DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+
+#if defined(__GNUC__) && (__GNUC__ >= 2)
+# if defined(__i386) || defined(__AMD64__) || defined(__x86_64__) || defined(__amd64__)
+ /* Reflect changes here to drmP.h */
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ int __dummy; /* Can't mark eax as clobbered */ \
+ __asm__ __volatile__( \
+ "lock ; cmpxchg %4,%1\n\t" \
+ "setnz %0" \
+ : "=d" (__ret), \
+ "=m" (__drm_dummy_lock(lock)), \
+ "=a" (__dummy) \
+ : "2" (old), \
+ "r" (new)); \
+ } while (0)
+
+#elif defined(__alpha__)
+
+#define DRM_CAS(lock, old, new, ret) \
+ do { \
+ int tmp, old32; \
+ __asm__ __volatile__( \
+ " addl $31, %5, %3\n" \
+ "1: ldl_l %0, %2\n" \
+ " cmpeq %0, %3, %1\n" \
+ " beq %1, 2f\n" \
+ " mov %4, %0\n" \
+ " stl_c %0, %2\n" \
+ " beq %0, 3f\n" \
+ " mb\n" \
+ "2: cmpeq %1, 0, %1\n" \
+ ".subsection 2\n" \
+ "3: br 1b\n" \
+ ".previous" \
+ : "=&r"(tmp), "=&r"(ret), \
+ "=m"(__drm_dummy_lock(lock)), \
+ "=&r"(old32) \
+ : "r"(new), "r"(old) \
+ : "memory"); \
+ } while (0)
+
+#elif defined(__sparc__)
+
+#define DRM_CAS(lock,old,new,__ret) \
+do { register unsigned int __old __asm("o0"); \
+ register unsigned int __new __asm("o1"); \
+ register volatile unsigned int *__lock __asm("o2"); \
+ __old = old; \
+ __new = new; \
+ __lock = (volatile unsigned int *)lock; \
+ __asm__ __volatile__( \
+ /*"cas [%2], %3, %0"*/ \
+ ".word 0xd3e29008\n\t" \
+ /*"membar #StoreStore | #StoreLoad"*/ \
+ ".word 0x8143e00a" \
+ : "=&r" (__new) \
+ : "0" (__new), \
+ "r" (__lock), \
+ "r" (__old) \
+ : "memory"); \
+ __ret = (__new != __old); \
+} while(0)
+
+#elif defined(__ia64__)
+
+#ifdef __INTEL_COMPILER
+/* this currently generates bad code (missing stop bits)... */
+#include <ia64intrin.h>
+
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ unsigned long __result, __old = (old) & 0xffffffff; \
+ __mf(); \
+ __result = _InterlockedCompareExchange_acq(&__drm_dummy_lock(lock), (new), __old);\
+ __ret = (__result) != (__old); \
+/* __ret = (__sync_val_compare_and_swap(&__drm_dummy_lock(lock), \
+ (old), (new)) \
+ != (old)); */\
+ } while (0)
+
+#else
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ unsigned int __result, __old = (old); \
+ __asm__ __volatile__( \
+ "mf\n" \
+ "mov ar.ccv=%2\n" \
+ ";;\n" \
+ "cmpxchg4.acq %0=%1,%3,ar.ccv" \
+ : "=r" (__result), "=m" (__drm_dummy_lock(lock)) \
+ : "r" ((unsigned long)__old), "r" (new) \
+ : "memory"); \
+ __ret = (__result) != (__old); \
+ } while (0)
+
+#endif
+
+#elif defined(__powerpc__)
+
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ __asm__ __volatile__( \
+ "sync;" \
+ "0: lwarx %0,0,%1;" \
+ " xor. %0,%3,%0;" \
+ " bne 1f;" \
+ " stwcx. %2,0,%1;" \
+ " bne- 0b;" \
+ "1: " \
+ "sync;" \
+ : "=&r"(__ret) \
+ : "r"(lock), "r"(new), "r"(old) \
+ : "cr0", "memory"); \
+ } while (0)
+
+#endif /* architecture */
+#endif /* __GNUC__ >= 2 */
+
+#ifndef DRM_CAS
+#define DRM_CAS(lock,old,new,ret) do { ret=1; } while (0) /* FAST LOCK FAILS */
+#endif
+
+#if defined(__alpha__)
+#define DRM_CAS_RESULT(_result) long _result
+#elif defined(__powerpc__)
+#define DRM_CAS_RESULT(_result) int _result
+#else
+#define DRM_CAS_RESULT(_result) char _result
+#endif
+
+#define DRM_LIGHT_LOCK(fd,lock,context) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
+ if (__ret) drmGetLock(fd,context,0); \
+ } while(0)
+
+ /* This one counts fast locks -- for
+ benchmarking only. */
+#define DRM_LIGHT_LOCK_COUNT(fd,lock,context,count) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
+ if (__ret) drmGetLock(fd,context,0); \
+ else ++count; \
+ } while(0)
+
+#define DRM_LOCK(fd,lock,context,flags) \
+ do { \
+ if (flags) drmGetLock(fd,context,flags); \
+ else DRM_LIGHT_LOCK(fd,lock,context); \
+ } while(0)
+
+#define DRM_UNLOCK(fd,lock,context) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ DRM_CAS(lock,DRM_LOCK_HELD|context,context,__ret); \
+ if (__ret) drmUnlock(fd,context); \
+ } while(0)
+
+ /* Simple spin locks */
+#define DRM_SPINLOCK(spin,val) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ do { \
+ DRM_CAS(spin,0,val,__ret); \
+ if (__ret) while ((spin)->lock); \
+ } while (__ret); \
+ } while(0)
+
+#define DRM_SPINLOCK_TAKE(spin,val) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ int cur; \
+ do { \
+ cur = (*spin).lock; \
+ DRM_CAS(spin,cur,val,__ret); \
+ } while (__ret); \
+ } while(0)
+
+#define DRM_SPINLOCK_COUNT(spin,val,count,__ret) \
+ do { \
+ int __i; \
+ __ret = 1; \
+ for (__i = 0; __ret && __i < count; __i++) { \
+ DRM_CAS(spin,0,val,__ret); \
+ if (__ret) for (;__i < count && (spin)->lock; __i++); \
+ } \
+ } while(0)
+
+#define DRM_SPINUNLOCK(spin,val) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ if ((*spin).lock == val) { /* else server stole lock */ \
+ do { \
+ DRM_CAS(spin,val,0,__ret); \
+ } while (__ret); \
+ } \
+ } while(0)
+
+
+
+/* General user-level programmer's API: unprivileged */
+extern int drmAvailable(void);
+extern int drmOpen(const char *name, const char *busid);
+
+#define DRM_NODE_PRIMARY 0
+#define DRM_NODE_CONTROL 1
+#define DRM_NODE_RENDER 2
+#define DRM_NODE_MAX 3
+
+extern int drmOpenWithType(const char *name, const char *busid,
+ int type);
+
+extern int drmOpenControl(int minor);
+extern int drmOpenRender(int minor);
+extern int drmClose(int fd);
+extern drmVersionPtr drmGetVersion(int fd);
+extern drmVersionPtr drmGetLibVersion(int fd);
+extern int drmGetCap(int fd, uint64_t capability, uint64_t *value);
+extern void drmFreeVersion(drmVersionPtr);
+extern int drmGetMagic(int fd, drm_magic_t * magic);
+extern char *drmGetBusid(int fd);
+extern int drmGetInterruptFromBusID(int fd, int busnum, int devnum,
+ int funcnum);
+extern int drmGetMap(int fd, int idx, drm_handle_t *offset,
+ drmSize *size, drmMapType *type,
+ drmMapFlags *flags, drm_handle_t *handle,
+ int *mtrr);
+extern int drmGetClient(int fd, int idx, int *auth, int *pid,
+ int *uid, unsigned long *magic,
+ unsigned long *iocs);
+extern int drmGetStats(int fd, drmStatsT *stats);
+extern int drmSetInterfaceVersion(int fd, drmSetVersion *version);
+extern int drmCommandNone(int fd, unsigned long drmCommandIndex);
+extern int drmCommandRead(int fd, unsigned long drmCommandIndex,
+ void *data, unsigned long size);
+extern int drmCommandWrite(int fd, unsigned long drmCommandIndex,
+ void *data, unsigned long size);
+extern int drmCommandWriteRead(int fd, unsigned long drmCommandIndex,
+ void *data, unsigned long size);
+
+/* General user-level programmer's API: X server (root) only */
+extern void drmFreeBusid(const char *busid);
+extern int drmSetBusid(int fd, const char *busid);
+extern int drmAuthMagic(int fd, drm_magic_t magic);
+extern int drmAddMap(int fd,
+ drm_handle_t offset,
+ drmSize size,
+ drmMapType type,
+ drmMapFlags flags,
+ drm_handle_t * handle);
+extern int drmRmMap(int fd, drm_handle_t handle);
+extern int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
+ drm_handle_t handle);
+
+extern int drmAddBufs(int fd, int count, int size,
+ drmBufDescFlags flags,
+ int agp_offset);
+extern int drmMarkBufs(int fd, double low, double high);
+extern int drmCreateContext(int fd, drm_context_t * handle);
+extern int drmSetContextFlags(int fd, drm_context_t context,
+ drm_context_tFlags flags);
+extern int drmGetContextFlags(int fd, drm_context_t context,
+ drm_context_tFlagsPtr flags);
+extern int drmAddContextTag(int fd, drm_context_t context, void *tag);
+extern int drmDelContextTag(int fd, drm_context_t context);
+extern void *drmGetContextTag(int fd, drm_context_t context);
+extern drm_context_t * drmGetReservedContextList(int fd, int *count);
+extern void drmFreeReservedContextList(drm_context_t *);
+extern int drmSwitchToContext(int fd, drm_context_t context);
+extern int drmDestroyContext(int fd, drm_context_t handle);
+extern int drmCreateDrawable(int fd, drm_drawable_t * handle);
+extern int drmDestroyDrawable(int fd, drm_drawable_t handle);
+extern int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
+ drm_drawable_info_type_t type,
+ unsigned int num, void *data);
+extern int drmCtlInstHandler(int fd, int irq);
+extern int drmCtlUninstHandler(int fd);
+extern int drmSetClientCap(int fd, uint64_t capability,
+ uint64_t value);
+
+/* General user-level programmer's API: authenticated client and/or X */
+extern int drmMap(int fd,
+ drm_handle_t handle,
+ drmSize size,
+ drmAddressPtr address);
+extern int drmUnmap(drmAddress address, drmSize size);
+extern drmBufInfoPtr drmGetBufInfo(int fd);
+extern drmBufMapPtr drmMapBufs(int fd);
+extern int drmUnmapBufs(drmBufMapPtr bufs);
+extern int drmDMA(int fd, drmDMAReqPtr request);
+extern int drmFreeBufs(int fd, int count, int *list);
+extern int drmGetLock(int fd,
+ drm_context_t context,
+ drmLockFlags flags);
+extern int drmUnlock(int fd, drm_context_t context);
+extern int drmFinish(int fd, int context, drmLockFlags flags);
+extern int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
+ drm_handle_t * handle);
+
+/* AGP/GART support: X server (root) only */
+extern int drmAgpAcquire(int fd);
+extern int drmAgpRelease(int fd);
+extern int drmAgpEnable(int fd, unsigned long mode);
+extern int drmAgpAlloc(int fd, unsigned long size,
+ unsigned long type, unsigned long *address,
+ drm_handle_t *handle);
+extern int drmAgpFree(int fd, drm_handle_t handle);
+extern int drmAgpBind(int fd, drm_handle_t handle,
+ unsigned long offset);
+extern int drmAgpUnbind(int fd, drm_handle_t handle);
+
+/* AGP/GART info: authenticated client and/or X */
+extern int drmAgpVersionMajor(int fd);
+extern int drmAgpVersionMinor(int fd);
+extern unsigned long drmAgpGetMode(int fd);
+extern unsigned long drmAgpBase(int fd); /* Physical location */
+extern unsigned long drmAgpSize(int fd); /* Bytes */
+extern unsigned long drmAgpMemoryUsed(int fd);
+extern unsigned long drmAgpMemoryAvail(int fd);
+extern unsigned int drmAgpVendorId(int fd);
+extern unsigned int drmAgpDeviceId(int fd);
+
+/* PCI scatter/gather support: X server (root) only */
+extern int drmScatterGatherAlloc(int fd, unsigned long size,
+ drm_handle_t *handle);
+extern int drmScatterGatherFree(int fd, drm_handle_t handle);
+
+extern int drmWaitVBlank(int fd, drmVBlankPtr vbl);
+
+/* Support routines */
+extern void drmSetServerInfo(drmServerInfoPtr info);
+extern int drmError(int err, const char *label);
+extern void *drmMalloc(int size);
+extern void drmFree(void *pt);
+
+/* Hash table routines */
+extern void *drmHashCreate(void);
+extern int drmHashDestroy(void *t);
+extern int drmHashLookup(void *t, unsigned long key, void **value);
+extern int drmHashInsert(void *t, unsigned long key, void *value);
+extern int drmHashDelete(void *t, unsigned long key);
+extern int drmHashFirst(void *t, unsigned long *key, void **value);
+extern int drmHashNext(void *t, unsigned long *key, void **value);
+
+/* PRNG routines */
+extern void *drmRandomCreate(unsigned long seed);
+extern int drmRandomDestroy(void *state);
+extern unsigned long drmRandom(void *state);
+extern double drmRandomDouble(void *state);
+
+/* Skip list routines */
+
+extern void *drmSLCreate(void);
+extern int drmSLDestroy(void *l);
+extern int drmSLLookup(void *l, unsigned long key, void **value);
+extern int drmSLInsert(void *l, unsigned long key, void *value);
+extern int drmSLDelete(void *l, unsigned long key);
+extern int drmSLNext(void *l, unsigned long *key, void **value);
+extern int drmSLFirst(void *l, unsigned long *key, void **value);
+extern void drmSLDump(void *l);
+extern int drmSLLookupNeighbors(void *l, unsigned long key,
+ unsigned long *prev_key, void **prev_value,
+ unsigned long *next_key, void **next_value);
+
+extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
+extern int drmOpenOnceWithType(const char *BusID, int *newlyopened, int type);
+extern void drmCloseOnce(int fd);
+extern void drmMsg(const char *format, ...) DRM_PRINTFLIKE(1, 2);
+
+extern int drmSetMaster(int fd);
+extern int drmDropMaster(int fd);
+
+#define DRM_EVENT_CONTEXT_VERSION 2
+
+typedef struct _drmEventContext {
+
+ /* This struct is versioned so we can add more pointers if we
+ * add more events. */
+ int version;
+
+ void (*vblank_handler)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data);
+
+ void (*page_flip_handler)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data);
+
+} drmEventContext, *drmEventContextPtr;
+
+extern int drmHandleEvent(int fd, drmEventContextPtr evctx);
+
+extern char *drmGetDeviceNameFromFd(int fd);
+extern int drmGetNodeTypeFromFd(int fd);
+
+extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd);
+extern int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle);
+
+extern char *drmGetPrimaryDeviceNameFromFd(int fd);
+extern char *drmGetRenderDeviceNameFromFd(int fd);
+
+#define DRM_BUS_PCI 0
+
+typedef struct _drmPciBusInfo {
+ uint16_t domain;
+ uint8_t bus;
+ uint8_t dev;
+ uint8_t func;
+} drmPciBusInfo, *drmPciBusInfoPtr;
+
+typedef struct _drmPciDeviceInfo {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t subvendor_id;
+ uint16_t subdevice_id;
+ uint8_t revision_id;
+} drmPciDeviceInfo, *drmPciDeviceInfoPtr;
+
+typedef struct _drmDevice {
+ char **nodes; /* DRM_NODE_MAX sized array */
+ int available_nodes; /* DRM_NODE_* bitmask */
+ int bustype;
+ union {
+ drmPciBusInfoPtr pci;
+ } businfo;
+ union {
+ drmPciDeviceInfoPtr pci;
+ } deviceinfo;
+} drmDevice, *drmDevicePtr;
+
+extern int drmGetDevice(int fd, drmDevicePtr *device);
+extern void drmFreeDevice(drmDevicePtr *device);
+
+extern int drmGetDevices(drmDevicePtr devices[], int max_devices);
+extern void drmFreeDevices(drmDevicePtr devices[], int count);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/chromium/third_party/libdrm/src/xf86drmHash.c b/chromium/third_party/libdrm/src/xf86drmHash.c
new file mode 100644
index 00000000000..f287e61fb2b
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86drmHash.c
@@ -0,0 +1,253 @@
+/* xf86drmHash.c -- Small hash table support for integer -> integer mapping
+ * Created: Sun Apr 18 09:35:45 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * DESCRIPTION
+ *
+ * This file contains a straightforward implementation of a fixed-sized
+ * hash table using self-organizing linked lists [Knuth73, pp. 398-399] for
+ * collision resolution. There are two potentially interesting things
+ * about this implementation:
+ *
+ * 1) The table is power-of-two sized. Prime sized tables are more
+ * traditional, but do not have a significant advantage over power-of-two
+ * sized table, especially when double hashing is not used for collision
+ * resolution.
+ *
+ * 2) The hash computation uses a table of random integers [Hanson97,
+ * pp. 39-41].
+ *
+ * FUTURE ENHANCEMENTS
+ *
+ * With a table size of 512, the current implementation is sufficient for a
+ * few hundred keys. Since this is well above the expected size of the
+ * tables for which this implementation was designed, the implementation of
+ * dynamic hash tables was postponed until the need arises. A common (and
+ * naive) approach to dynamic hash table implementation simply creates a
+ * new hash table when necessary, rehashes all the data into the new table,
+ * and destroys the old table. The approach in [Larson88] is superior in
+ * two ways: 1) only a portion of the table is expanded when needed,
+ * distributing the expansion cost over several insertions, and 2) portions
+ * of the table can be locked, enabling a scalable thread-safe
+ * implementation.
+ *
+ * REFERENCES
+ *
+ * [Hanson97] David R. Hanson. C Interfaces and Implementations:
+ * Techniques for Creating Reusable Software. Reading, Massachusetts:
+ * Addison-Wesley, 1997.
+ *
+ * [Knuth73] Donald E. Knuth. The Art of Computer Programming. Volume 3:
+ * Sorting and Searching. Reading, Massachusetts: Addison-Wesley, 1973.
+ *
+ * [Larson88] Per-Ake Larson. "Dynamic Hash Tables". CACM 31(4), April
+ * 1988, pp. 446-457.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "xf86drm.h"
+#include "xf86drmHash.h"
+
+#define HASH_MAGIC 0xdeadbeef
+
+static unsigned long HashHash(unsigned long key)
+{
+ unsigned long hash = 0;
+ unsigned long tmp = key;
+ static int init = 0;
+ static unsigned long scatter[256];
+ int i;
+
+ if (!init) {
+ void *state;
+ state = drmRandomCreate(37);
+ for (i = 0; i < 256; i++) scatter[i] = drmRandom(state);
+ drmRandomDestroy(state);
+ ++init;
+ }
+
+ while (tmp) {
+ hash = (hash << 1) + scatter[tmp & 0xff];
+ tmp >>= 8;
+ }
+
+ hash %= HASH_SIZE;
+#if DEBUG
+ printf( "Hash(%lu) = %lu\n", key, hash);
+#endif
+ return hash;
+}
+
+void *drmHashCreate(void)
+{
+ HashTablePtr table;
+ int i;
+
+ table = drmMalloc(sizeof(*table));
+ if (!table) return NULL;
+ table->magic = HASH_MAGIC;
+ table->entries = 0;
+ table->hits = 0;
+ table->partials = 0;
+ table->misses = 0;
+
+ for (i = 0; i < HASH_SIZE; i++) table->buckets[i] = NULL;
+ return table;
+}
+
+int drmHashDestroy(void *t)
+{
+ HashTablePtr table = (HashTablePtr)t;
+ HashBucketPtr bucket;
+ HashBucketPtr next;
+ int i;
+
+ if (table->magic != HASH_MAGIC) return -1; /* Bad magic */
+
+ for (i = 0; i < HASH_SIZE; i++) {
+ for (bucket = table->buckets[i]; bucket;) {
+ next = bucket->next;
+ drmFree(bucket);
+ bucket = next;
+ }
+ }
+ drmFree(table);
+ return 0;
+}
+
+/* Find the bucket and organize the list so that this bucket is at the
+ top. */
+
+static HashBucketPtr HashFind(HashTablePtr table,
+ unsigned long key, unsigned long *h)
+{
+ unsigned long hash = HashHash(key);
+ HashBucketPtr prev = NULL;
+ HashBucketPtr bucket;
+
+ if (h) *h = hash;
+
+ for (bucket = table->buckets[hash]; bucket; bucket = bucket->next) {
+ if (bucket->key == key) {
+ if (prev) {
+ /* Organize */
+ prev->next = bucket->next;
+ bucket->next = table->buckets[hash];
+ table->buckets[hash] = bucket;
+ ++table->partials;
+ } else {
+ ++table->hits;
+ }
+ return bucket;
+ }
+ prev = bucket;
+ }
+ ++table->misses;
+ return NULL;
+}
+
+int drmHashLookup(void *t, unsigned long key, void **value)
+{
+ HashTablePtr table = (HashTablePtr)t;
+ HashBucketPtr bucket;
+
+ if (!table || table->magic != HASH_MAGIC) return -1; /* Bad magic */
+
+ bucket = HashFind(table, key, NULL);
+ if (!bucket) return 1; /* Not found */
+ *value = bucket->value;
+ return 0; /* Found */
+}
+
+int drmHashInsert(void *t, unsigned long key, void *value)
+{
+ HashTablePtr table = (HashTablePtr)t;
+ HashBucketPtr bucket;
+ unsigned long hash;
+
+ if (table->magic != HASH_MAGIC) return -1; /* Bad magic */
+
+ if (HashFind(table, key, &hash)) return 1; /* Already in table */
+
+ bucket = drmMalloc(sizeof(*bucket));
+ if (!bucket) return -1; /* Error */
+ bucket->key = key;
+ bucket->value = value;
+ bucket->next = table->buckets[hash];
+ table->buckets[hash] = bucket;
+#if DEBUG
+ printf("Inserted %lu at %lu/%p\n", key, hash, bucket);
+#endif
+ return 0; /* Added to table */
+}
+
+int drmHashDelete(void *t, unsigned long key)
+{
+ HashTablePtr table = (HashTablePtr)t;
+ unsigned long hash;
+ HashBucketPtr bucket;
+
+ if (table->magic != HASH_MAGIC) return -1; /* Bad magic */
+
+ bucket = HashFind(table, key, &hash);
+
+ if (!bucket) return 1; /* Not found */
+
+ table->buckets[hash] = bucket->next;
+ drmFree(bucket);
+ return 0;
+}
+
+int drmHashNext(void *t, unsigned long *key, void **value)
+{
+ HashTablePtr table = (HashTablePtr)t;
+
+ while (table->p0 < HASH_SIZE) {
+ if (table->p1) {
+ *key = table->p1->key;
+ *value = table->p1->value;
+ table->p1 = table->p1->next;
+ return 1;
+ }
+ table->p1 = table->buckets[table->p0];
+ ++table->p0;
+ }
+ return 0;
+}
+
+int drmHashFirst(void *t, unsigned long *key, void **value)
+{
+ HashTablePtr table = (HashTablePtr)t;
+
+ if (table->magic != HASH_MAGIC) return -1; /* Bad magic */
+
+ table->p0 = 0;
+ table->p1 = table->buckets[0];
+ return drmHashNext(table, key, value);
+}
diff --git a/chromium/third_party/libdrm/src/xf86drmHash.h b/chromium/third_party/libdrm/src/xf86drmHash.h
new file mode 100644
index 00000000000..38fd84b7046
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86drmHash.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+#define HASH_SIZE 512 /* Good for about 100 entries */
+ /* If you change this value, you probably
+ have to change the HashHash hashing
+ function! */
+
+typedef struct HashBucket {
+ unsigned long key;
+ void *value;
+ struct HashBucket *next;
+} HashBucket, *HashBucketPtr;
+
+typedef struct HashTable {
+ unsigned long magic;
+ unsigned long entries;
+ unsigned long hits; /* At top of linked list */
+ unsigned long partials; /* Not at top of linked list */
+ unsigned long misses; /* Not in table */
+ HashBucketPtr buckets[HASH_SIZE];
+ int p0;
+ HashBucketPtr p1;
+} HashTable, *HashTablePtr;
diff --git a/chromium/third_party/libdrm/src/xf86drmMode.c b/chromium/third_party/libdrm/src/xf86drmMode.c
new file mode 100644
index 00000000000..aaf259b7b1d
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86drmMode.c
@@ -0,0 +1,1502 @@
+/*
+ * \file xf86drmMode.c
+ * Header for DRM modesetting interface.
+ *
+ * \author Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ * \par Acknowledgements:
+ * Feb 2007, Dave Airlie <airlied@linux.ie>
+ */
+
+/*
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2007-2008 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * TODO the types we are after are defined in different headers on different
+ * platforms find which headers to include to get uint32_t
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#ifdef HAVE_SYS_SYSCTL_H
+#include <sys/sysctl.h>
+#endif
+#include <stdio.h>
+#include <stdbool.h>
+
+#include "xf86drmMode.h"
+#include "xf86drm.h"
+#include <drm.h>
+#include <string.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <errno.h>
+
+#define memclear(s) memset(&s, 0, sizeof(s))
+
+#define U642VOID(x) ((void *)(unsigned long)(x))
+#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
+
+static inline int DRM_IOCTL(int fd, unsigned long cmd, void *arg)
+{
+ int ret = drmIoctl(fd, cmd, arg);
+ return ret < 0 ? -errno : ret;
+}
+
+/*
+ * Util functions
+ */
+
+static void* drmAllocCpy(char *array, int count, int entry_size)
+{
+ char *r;
+ int i;
+
+ if (!count || !array || !entry_size)
+ return 0;
+
+ if (!(r = drmMalloc(count*entry_size)))
+ return 0;
+
+ for (i = 0; i < count; i++)
+ memcpy(r+(entry_size*i), array+(entry_size*i), entry_size);
+
+ return r;
+}
+
+/*
+ * A couple of free functions.
+ */
+
+void drmModeFreeModeInfo(drmModeModeInfoPtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr);
+}
+
+void drmModeFreeResources(drmModeResPtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr->fbs);
+ drmFree(ptr->crtcs);
+ drmFree(ptr->connectors);
+ drmFree(ptr->encoders);
+ drmFree(ptr);
+}
+
+void drmModeFreeFB(drmModeFBPtr ptr)
+{
+ if (!ptr)
+ return;
+
+ /* we might add more frees later. */
+ drmFree(ptr);
+}
+
+void drmModeFreeCrtc(drmModeCrtcPtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr);
+}
+
+void drmModeFreeConnector(drmModeConnectorPtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr->encoders);
+ drmFree(ptr->prop_values);
+ drmFree(ptr->props);
+ drmFree(ptr->modes);
+ drmFree(ptr);
+}
+
+void drmModeFreeEncoder(drmModeEncoderPtr ptr)
+{
+ drmFree(ptr);
+}
+
+/*
+ * ModeSetting functions.
+ */
+
+drmModeResPtr drmModeGetResources(int fd)
+{
+ struct drm_mode_card_res res, counts;
+ drmModeResPtr r = 0;
+
+retry:
+ memclear(res);
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res))
+ return 0;
+
+ counts = res;
+
+ if (res.count_fbs) {
+ res.fb_id_ptr = VOID2U64(drmMalloc(res.count_fbs*sizeof(uint32_t)));
+ if (!res.fb_id_ptr)
+ goto err_allocs;
+ }
+ if (res.count_crtcs) {
+ res.crtc_id_ptr = VOID2U64(drmMalloc(res.count_crtcs*sizeof(uint32_t)));
+ if (!res.crtc_id_ptr)
+ goto err_allocs;
+ }
+ if (res.count_connectors) {
+ res.connector_id_ptr = VOID2U64(drmMalloc(res.count_connectors*sizeof(uint32_t)));
+ if (!res.connector_id_ptr)
+ goto err_allocs;
+ }
+ if (res.count_encoders) {
+ res.encoder_id_ptr = VOID2U64(drmMalloc(res.count_encoders*sizeof(uint32_t)));
+ if (!res.encoder_id_ptr)
+ goto err_allocs;
+ }
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res))
+ goto err_allocs;
+
+ /* The number of available connectors and etc may have changed with a
+ * hotplug event in between the ioctls, in which case the field is
+ * silently ignored by the kernel.
+ */
+ if (counts.count_fbs < res.count_fbs ||
+ counts.count_crtcs < res.count_crtcs ||
+ counts.count_connectors < res.count_connectors ||
+ counts.count_encoders < res.count_encoders)
+ {
+ drmFree(U642VOID(res.fb_id_ptr));
+ drmFree(U642VOID(res.crtc_id_ptr));
+ drmFree(U642VOID(res.connector_id_ptr));
+ drmFree(U642VOID(res.encoder_id_ptr));
+
+ goto retry;
+ }
+
+ /*
+ * return
+ */
+ if (!(r = drmMalloc(sizeof(*r))))
+ goto err_allocs;
+
+ r->min_width = res.min_width;
+ r->max_width = res.max_width;
+ r->min_height = res.min_height;
+ r->max_height = res.max_height;
+ r->count_fbs = res.count_fbs;
+ r->count_crtcs = res.count_crtcs;
+ r->count_connectors = res.count_connectors;
+ r->count_encoders = res.count_encoders;
+
+ r->fbs = drmAllocCpy(U642VOID(res.fb_id_ptr), res.count_fbs, sizeof(uint32_t));
+ r->crtcs = drmAllocCpy(U642VOID(res.crtc_id_ptr), res.count_crtcs, sizeof(uint32_t));
+ r->connectors = drmAllocCpy(U642VOID(res.connector_id_ptr), res.count_connectors, sizeof(uint32_t));
+ r->encoders = drmAllocCpy(U642VOID(res.encoder_id_ptr), res.count_encoders, sizeof(uint32_t));
+ if ((res.count_fbs && !r->fbs) ||
+ (res.count_crtcs && !r->crtcs) ||
+ (res.count_connectors && !r->connectors) ||
+ (res.count_encoders && !r->encoders))
+ {
+ drmFree(r->fbs);
+ drmFree(r->crtcs);
+ drmFree(r->connectors);
+ drmFree(r->encoders);
+ drmFree(r);
+ r = 0;
+ }
+
+err_allocs:
+ drmFree(U642VOID(res.fb_id_ptr));
+ drmFree(U642VOID(res.crtc_id_ptr));
+ drmFree(U642VOID(res.connector_id_ptr));
+ drmFree(U642VOID(res.encoder_id_ptr));
+
+ return r;
+}
+
+int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
+ uint8_t bpp, uint32_t pitch, uint32_t bo_handle,
+ uint32_t *buf_id)
+{
+ struct drm_mode_fb_cmd f;
+ int ret;
+
+ memclear(f);
+ f.width = width;
+ f.height = height;
+ f.pitch = pitch;
+ f.bpp = bpp;
+ f.depth = depth;
+ f.handle = bo_handle;
+
+ if ((ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ADDFB, &f)))
+ return ret;
+
+ *buf_id = f.fb_id;
+ return 0;
+}
+
+int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height,
+ uint32_t pixel_format, uint32_t bo_handles[4],
+ uint32_t pitches[4], uint32_t offsets[4],
+ uint64_t modifier[4], uint32_t *buf_id, uint32_t flags)
+{
+ struct drm_mode_fb_cmd2 f;
+ int ret;
+
+ memclear(f);
+ f.width = width;
+ f.height = height;
+ f.pixel_format = pixel_format;
+ f.flags = flags;
+ memcpy(f.handles, bo_handles, 4 * sizeof(bo_handles[0]));
+ memcpy(f.pitches, pitches, 4 * sizeof(pitches[0]));
+ memcpy(f.offsets, offsets, 4 * sizeof(offsets[0]));
+ if (modifier)
+ memcpy(f.modifier, modifier, 4 * sizeof(modifier[0]));
+
+ if ((ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ADDFB2, &f)))
+ return ret;
+
+ *buf_id = f.fb_id;
+ return 0;
+}
+
+int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
+ uint32_t pixel_format, uint32_t bo_handles[4],
+ uint32_t pitches[4], uint32_t offsets[4],
+ uint32_t *buf_id, uint32_t flags)
+{
+ return drmModeAddFB2WithModifiers(fd, width, height,
+ pixel_format, bo_handles,
+ pitches, offsets, NULL,
+ buf_id, flags);
+}
+
+int drmModeRmFB(int fd, uint32_t bufferId)
+{
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_RMFB, &bufferId);
+}
+
+drmModeFBPtr drmModeGetFB(int fd, uint32_t buf)
+{
+ struct drm_mode_fb_cmd info;
+ drmModeFBPtr r;
+
+ memclear(info);
+ info.fb_id = buf;
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETFB, &info))
+ return NULL;
+
+ if (!(r = drmMalloc(sizeof(*r))))
+ return NULL;
+
+ r->fb_id = info.fb_id;
+ r->width = info.width;
+ r->height = info.height;
+ r->pitch = info.pitch;
+ r->bpp = info.bpp;
+ r->handle = info.handle;
+ r->depth = info.depth;
+
+ return r;
+}
+
+int drmModeDirtyFB(int fd, uint32_t bufferId,
+ drmModeClipPtr clips, uint32_t num_clips)
+{
+ struct drm_mode_fb_dirty_cmd dirty;
+
+ memclear(dirty);
+ dirty.fb_id = bufferId;
+ dirty.clips_ptr = VOID2U64(clips);
+ dirty.num_clips = num_clips;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_DIRTYFB, &dirty);
+}
+
+/*
+ * Crtc functions
+ */
+
+drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId)
+{
+ struct drm_mode_crtc crtc;
+ drmModeCrtcPtr r;
+
+ memclear(crtc);
+ crtc.crtc_id = crtcId;
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETCRTC, &crtc))
+ return 0;
+
+ /*
+ * return
+ */
+
+ if (!(r = drmMalloc(sizeof(*r))))
+ return 0;
+
+ r->crtc_id = crtc.crtc_id;
+ r->x = crtc.x;
+ r->y = crtc.y;
+ r->mode_valid = crtc.mode_valid;
+ if (r->mode_valid) {
+ memcpy(&r->mode, &crtc.mode, sizeof(struct drm_mode_modeinfo));
+ r->width = crtc.mode.hdisplay;
+ r->height = crtc.mode.vdisplay;
+ }
+ r->buffer_id = crtc.fb_id;
+ r->gamma_size = crtc.gamma_size;
+ return r;
+}
+
+int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
+ uint32_t x, uint32_t y, uint32_t *connectors, int count,
+ drmModeModeInfoPtr mode)
+{
+ struct drm_mode_crtc crtc;
+
+ memclear(crtc);
+ crtc.x = x;
+ crtc.y = y;
+ crtc.crtc_id = crtcId;
+ crtc.fb_id = bufferId;
+ crtc.set_connectors_ptr = VOID2U64(connectors);
+ crtc.count_connectors = count;
+ if (mode) {
+ memcpy(&crtc.mode, mode, sizeof(struct drm_mode_modeinfo));
+ crtc.mode_valid = 1;
+ }
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETCRTC, &crtc);
+}
+
+/*
+ * Cursor manipulation
+ */
+
+int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height)
+{
+ struct drm_mode_cursor arg;
+
+ memclear(arg);
+ arg.flags = DRM_MODE_CURSOR_BO;
+ arg.crtc_id = crtcId;
+ arg.width = width;
+ arg.height = height;
+ arg.handle = bo_handle;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_CURSOR, &arg);
+}
+
+int drmModeSetCursor2(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y)
+{
+ struct drm_mode_cursor2 arg;
+
+ memclear(arg);
+ arg.flags = DRM_MODE_CURSOR_BO;
+ arg.crtc_id = crtcId;
+ arg.width = width;
+ arg.height = height;
+ arg.handle = bo_handle;
+ arg.hot_x = hot_x;
+ arg.hot_y = hot_y;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_CURSOR2, &arg);
+}
+
+int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y)
+{
+ struct drm_mode_cursor arg;
+
+ memclear(arg);
+ arg.flags = DRM_MODE_CURSOR_MOVE;
+ arg.crtc_id = crtcId;
+ arg.x = x;
+ arg.y = y;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_CURSOR, &arg);
+}
+
+/*
+ * Encoder get
+ */
+drmModeEncoderPtr drmModeGetEncoder(int fd, uint32_t encoder_id)
+{
+ struct drm_mode_get_encoder enc;
+ drmModeEncoderPtr r = NULL;
+
+ memclear(enc);
+ enc.encoder_id = encoder_id;
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETENCODER, &enc))
+ return 0;
+
+ if (!(r = drmMalloc(sizeof(*r))))
+ return 0;
+
+ r->encoder_id = enc.encoder_id;
+ r->crtc_id = enc.crtc_id;
+ r->encoder_type = enc.encoder_type;
+ r->possible_crtcs = enc.possible_crtcs;
+ r->possible_clones = enc.possible_clones;
+
+ return r;
+}
+
+/*
+ * Connector manipulation
+ */
+static drmModeConnectorPtr
+_drmModeGetConnector(int fd, uint32_t connector_id, int probe)
+{
+ struct drm_mode_get_connector conn, counts;
+ drmModeConnectorPtr r = NULL;
+ struct drm_mode_modeinfo stack_mode;
+
+ memclear(conn);
+ conn.connector_id = connector_id;
+ if (!probe) {
+ conn.count_modes = 1;
+ conn.modes_ptr = VOID2U64(&stack_mode);
+ }
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
+ return 0;
+
+retry:
+ counts = conn;
+
+ if (conn.count_props) {
+ conn.props_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint32_t)));
+ if (!conn.props_ptr)
+ goto err_allocs;
+ conn.prop_values_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint64_t)));
+ if (!conn.prop_values_ptr)
+ goto err_allocs;
+ }
+
+ if (conn.count_modes) {
+ conn.modes_ptr = VOID2U64(drmMalloc(conn.count_modes*sizeof(struct drm_mode_modeinfo)));
+ if (!conn.modes_ptr)
+ goto err_allocs;
+ } else {
+ conn.count_modes = 1;
+ conn.modes_ptr = VOID2U64(&stack_mode);
+ }
+
+ if (conn.count_encoders) {
+ conn.encoders_ptr = VOID2U64(drmMalloc(conn.count_encoders*sizeof(uint32_t)));
+ if (!conn.encoders_ptr)
+ goto err_allocs;
+ }
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
+ goto err_allocs;
+
+ /* The number of available connectors and etc may have changed with a
+ * hotplug event in between the ioctls, in which case the field is
+ * silently ignored by the kernel.
+ */
+ if (counts.count_props < conn.count_props ||
+ counts.count_modes < conn.count_modes ||
+ counts.count_encoders < conn.count_encoders) {
+ drmFree(U642VOID(conn.props_ptr));
+ drmFree(U642VOID(conn.prop_values_ptr));
+ if (U642VOID(conn.modes_ptr) != &stack_mode)
+ drmFree(U642VOID(conn.modes_ptr));
+ drmFree(U642VOID(conn.encoders_ptr));
+
+ goto retry;
+ }
+
+ if(!(r = drmMalloc(sizeof(*r)))) {
+ goto err_allocs;
+ }
+
+ r->connector_id = conn.connector_id;
+ r->encoder_id = conn.encoder_id;
+ r->connection = conn.connection;
+ r->mmWidth = conn.mm_width;
+ r->mmHeight = conn.mm_height;
+ /* convert subpixel from kernel to userspace */
+ r->subpixel = conn.subpixel + 1;
+ r->count_modes = conn.count_modes;
+ r->count_props = conn.count_props;
+ r->props = drmAllocCpy(U642VOID(conn.props_ptr), conn.count_props, sizeof(uint32_t));
+ r->prop_values = drmAllocCpy(U642VOID(conn.prop_values_ptr), conn.count_props, sizeof(uint64_t));
+ r->modes = drmAllocCpy(U642VOID(conn.modes_ptr), conn.count_modes, sizeof(struct drm_mode_modeinfo));
+ r->count_encoders = conn.count_encoders;
+ r->encoders = drmAllocCpy(U642VOID(conn.encoders_ptr), conn.count_encoders, sizeof(uint32_t));
+ r->connector_type = conn.connector_type;
+ r->connector_type_id = conn.connector_type_id;
+
+ if ((r->count_props && !r->props) ||
+ (r->count_props && !r->prop_values) ||
+ (r->count_modes && !r->modes) ||
+ (r->count_encoders && !r->encoders)) {
+ drmFree(r->props);
+ drmFree(r->prop_values);
+ drmFree(r->modes);
+ drmFree(r->encoders);
+ drmFree(r);
+ r = 0;
+ }
+
+err_allocs:
+ drmFree(U642VOID(conn.prop_values_ptr));
+ drmFree(U642VOID(conn.props_ptr));
+ if (U642VOID(conn.modes_ptr) != &stack_mode)
+ drmFree(U642VOID(conn.modes_ptr));
+ drmFree(U642VOID(conn.encoders_ptr));
+
+ return r;
+}
+
+drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id)
+{
+ return _drmModeGetConnector(fd, connector_id, 1);
+}
+
+drmModeConnectorPtr drmModeGetConnectorCurrent(int fd, uint32_t connector_id)
+{
+ return _drmModeGetConnector(fd, connector_id, 0);
+}
+
+int drmModeAttachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_info)
+{
+ struct drm_mode_mode_cmd res;
+
+ memclear(res);
+ memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo));
+ res.connector_id = connector_id;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_ATTACHMODE, &res);
+}
+
+int drmModeDetachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_info)
+{
+ struct drm_mode_mode_cmd res;
+
+ memclear(res);
+ memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo));
+ res.connector_id = connector_id;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_DETACHMODE, &res);
+}
+
+drmModePropertyPtr drmModeGetProperty(int fd, uint32_t property_id)
+{
+ struct drm_mode_get_property prop;
+ drmModePropertyPtr r;
+
+ memclear(prop);
+ prop.prop_id = property_id;
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPROPERTY, &prop))
+ return 0;
+
+ if (prop.count_values)
+ prop.values_ptr = VOID2U64(drmMalloc(prop.count_values * sizeof(uint64_t)));
+
+ if (prop.count_enum_blobs && (prop.flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+ prop.enum_blob_ptr = VOID2U64(drmMalloc(prop.count_enum_blobs * sizeof(struct drm_mode_property_enum)));
+
+ if (prop.count_enum_blobs && (prop.flags & DRM_MODE_PROP_BLOB)) {
+ prop.values_ptr = VOID2U64(drmMalloc(prop.count_enum_blobs * sizeof(uint32_t)));
+ prop.enum_blob_ptr = VOID2U64(drmMalloc(prop.count_enum_blobs * sizeof(uint32_t)));
+ }
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPROPERTY, &prop)) {
+ r = NULL;
+ goto err_allocs;
+ }
+
+ if (!(r = drmMalloc(sizeof(*r))))
+ return NULL;
+
+ r->prop_id = prop.prop_id;
+ r->count_values = prop.count_values;
+
+ r->flags = prop.flags;
+ if (prop.count_values)
+ r->values = drmAllocCpy(U642VOID(prop.values_ptr), prop.count_values, sizeof(uint64_t));
+ if (prop.flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ r->count_enums = prop.count_enum_blobs;
+ r->enums = drmAllocCpy(U642VOID(prop.enum_blob_ptr), prop.count_enum_blobs, sizeof(struct drm_mode_property_enum));
+ } else if (prop.flags & DRM_MODE_PROP_BLOB) {
+ r->values = drmAllocCpy(U642VOID(prop.values_ptr), prop.count_enum_blobs, sizeof(uint32_t));
+ r->blob_ids = drmAllocCpy(U642VOID(prop.enum_blob_ptr), prop.count_enum_blobs, sizeof(uint32_t));
+ r->count_blobs = prop.count_enum_blobs;
+ }
+ strncpy(r->name, prop.name, DRM_PROP_NAME_LEN);
+ r->name[DRM_PROP_NAME_LEN-1] = 0;
+
+err_allocs:
+ drmFree(U642VOID(prop.values_ptr));
+ drmFree(U642VOID(prop.enum_blob_ptr));
+
+ return r;
+}
+
+void drmModeFreeProperty(drmModePropertyPtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr->values);
+ drmFree(ptr->enums);
+ drmFree(ptr);
+}
+
+drmModePropertyBlobPtr drmModeGetPropertyBlob(int fd, uint32_t blob_id)
+{
+ struct drm_mode_get_blob blob;
+ drmModePropertyBlobPtr r;
+
+ memclear(blob);
+ blob.blob_id = blob_id;
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob))
+ return NULL;
+
+ if (blob.length)
+ blob.data = VOID2U64(drmMalloc(blob.length));
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob)) {
+ r = NULL;
+ goto err_allocs;
+ }
+
+ if (!(r = drmMalloc(sizeof(*r))))
+ goto err_allocs;
+
+ r->id = blob.blob_id;
+ r->length = blob.length;
+ r->data = drmAllocCpy(U642VOID(blob.data), 1, blob.length);
+
+err_allocs:
+ drmFree(U642VOID(blob.data));
+ return r;
+}
+
+void drmModeFreePropertyBlob(drmModePropertyBlobPtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr->data);
+ drmFree(ptr);
+}
+
+int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property_id,
+ uint64_t value)
+{
+ struct drm_mode_connector_set_property osp;
+
+ memclear(osp);
+ osp.connector_id = connector_id;
+ osp.prop_id = property_id;
+ osp.value = value;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETPROPERTY, &osp);
+}
+
+/*
+ * checks if a modesetting capable driver has attached to the pci id
+ * returns 0 if modesetting supported.
+ * -EINVAL or invalid bus id
+ * -ENOSYS if no modesetting support
+*/
+int drmCheckModesettingSupported(const char *busid)
+{
+#if defined (__linux__)
+ char pci_dev_dir[1024];
+ int domain, bus, dev, func;
+ DIR *sysdir;
+ struct dirent *dent;
+ int found = 0, ret;
+
+ ret = sscanf(busid, "pci:%04x:%02x:%02x.%d", &domain, &bus, &dev, &func);
+ if (ret != 4)
+ return -EINVAL;
+
+ sprintf(pci_dev_dir, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/drm",
+ domain, bus, dev, func);
+
+ sysdir = opendir(pci_dev_dir);
+ if (sysdir) {
+ dent = readdir(sysdir);
+ while (dent) {
+ if (!strncmp(dent->d_name, "controlD", 8)) {
+ found = 1;
+ break;
+ }
+
+ dent = readdir(sysdir);
+ }
+ closedir(sysdir);
+ if (found)
+ return 0;
+ }
+
+ sprintf(pci_dev_dir, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/",
+ domain, bus, dev, func);
+
+ sysdir = opendir(pci_dev_dir);
+ if (!sysdir)
+ return -EINVAL;
+
+ dent = readdir(sysdir);
+ while (dent) {
+ if (!strncmp(dent->d_name, "drm:controlD", 12)) {
+ found = 1;
+ break;
+ }
+
+ dent = readdir(sysdir);
+ }
+
+ closedir(sysdir);
+ if (found)
+ return 0;
+#elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
+ char kbusid[1024], sbusid[1024];
+ char oid[128];
+ int domain, bus, dev, func;
+ int i, modesetting, ret;
+ size_t len;
+
+ ret = sscanf(busid, "pci:%04x:%02x:%02x.%d", &domain, &bus, &dev,
+ &func);
+ if (ret != 4)
+ return -EINVAL;
+ snprintf(kbusid, sizeof(kbusid), "pci:%04x:%02x:%02x.%d", domain, bus,
+ dev, func);
+
+ /* How many GPUs do we expect in the machine ? */
+ for (i = 0; i < 16; i++) {
+ snprintf(oid, sizeof(oid), "hw.dri.%d.busid", i);
+ len = sizeof(sbusid);
+ ret = sysctlbyname(oid, sbusid, &len, NULL, 0);
+ if (ret == -1) {
+ if (errno == ENOENT)
+ continue;
+ return -EINVAL;
+ }
+ if (strcmp(sbusid, kbusid) != 0)
+ continue;
+ snprintf(oid, sizeof(oid), "hw.dri.%d.modesetting", i);
+ len = sizeof(modesetting);
+ ret = sysctlbyname(oid, &modesetting, &len, NULL, 0);
+ if (ret == -1 || len != sizeof(modesetting))
+ return -EINVAL;
+ return (modesetting ? 0 : -ENOSYS);
+ }
+#elif defined(__DragonFly__)
+ return 0;
+#endif
+#ifdef __OpenBSD__
+ int fd;
+ struct drm_mode_card_res res;
+ drmModeResPtr r = 0;
+
+ if ((fd = drmOpen(NULL, busid)) < 0)
+ return -EINVAL;
+
+ memset(&res, 0, sizeof(struct drm_mode_card_res));
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res)) {
+ drmClose(fd);
+ return -errno;
+ }
+
+ drmClose(fd);
+ return 0;
+#endif
+ return -ENOSYS;
+}
+
+int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
+ uint16_t *red, uint16_t *green, uint16_t *blue)
+{
+ struct drm_mode_crtc_lut l;
+
+ memclear(l);
+ l.crtc_id = crtc_id;
+ l.gamma_size = size;
+ l.red = VOID2U64(red);
+ l.green = VOID2U64(green);
+ l.blue = VOID2U64(blue);
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_GETGAMMA, &l);
+}
+
+int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
+ uint16_t *red, uint16_t *green, uint16_t *blue)
+{
+ struct drm_mode_crtc_lut l;
+
+ memclear(l);
+ l.crtc_id = crtc_id;
+ l.gamma_size = size;
+ l.red = VOID2U64(red);
+ l.green = VOID2U64(green);
+ l.blue = VOID2U64(blue);
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETGAMMA, &l);
+}
+
+int drmHandleEvent(int fd, drmEventContextPtr evctx)
+{
+ char buffer[1024];
+ int len, i;
+ struct drm_event *e;
+ struct drm_event_vblank *vblank;
+
+ /* The DRM read semantics guarantees that we always get only
+ * complete events. */
+
+ len = read(fd, buffer, sizeof buffer);
+ if (len == 0)
+ return 0;
+ if (len < (int)sizeof *e)
+ return -1;
+
+ i = 0;
+ while (i < len) {
+ e = (struct drm_event *) &buffer[i];
+ switch (e->type) {
+ case DRM_EVENT_VBLANK:
+ if (evctx->version < 1 ||
+ evctx->vblank_handler == NULL)
+ break;
+ vblank = (struct drm_event_vblank *) e;
+ evctx->vblank_handler(fd,
+ vblank->sequence,
+ vblank->tv_sec,
+ vblank->tv_usec,
+ U642VOID (vblank->user_data));
+ break;
+ case DRM_EVENT_FLIP_COMPLETE:
+ if (evctx->version < 2 ||
+ evctx->page_flip_handler == NULL)
+ break;
+ vblank = (struct drm_event_vblank *) e;
+ evctx->page_flip_handler(fd,
+ vblank->sequence,
+ vblank->tv_sec,
+ vblank->tv_usec,
+ U642VOID (vblank->user_data));
+ break;
+ default:
+ break;
+ }
+ i += e->length;
+ }
+
+ return 0;
+}
+
+int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
+ uint32_t flags, void *user_data)
+{
+ struct drm_mode_crtc_page_flip flip;
+
+ memclear(flip);
+ flip.fb_id = fb_id;
+ flip.crtc_id = crtc_id;
+ flip.user_data = VOID2U64(user_data);
+ flip.flags = flags;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip);
+}
+
+int drmModeSetPlane(int fd, uint32_t plane_id, uint32_t crtc_id,
+ uint32_t fb_id, uint32_t flags,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_mode_set_plane s;
+
+ memclear(s);
+ s.plane_id = plane_id;
+ s.crtc_id = crtc_id;
+ s.fb_id = fb_id;
+ s.flags = flags;
+ s.crtc_x = crtc_x;
+ s.crtc_y = crtc_y;
+ s.crtc_w = crtc_w;
+ s.crtc_h = crtc_h;
+ s.src_x = src_x;
+ s.src_y = src_y;
+ s.src_w = src_w;
+ s.src_h = src_h;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETPLANE, &s);
+}
+
+static drmModePlanePtr get_plane(unsigned long cmd, int fd, uint32_t plane_id)
+{
+ struct drm_mode_get_plane2 ovr, counts;
+ drmModePlanePtr r = 0;
+
+retry:
+ memclear(ovr);
+ ovr.plane_id = plane_id;
+ if (drmIoctl(fd, cmd, &ovr))
+ return 0;
+
+ counts = ovr;
+
+ if (ovr.count_format_types) {
+ ovr.format_type_ptr = VOID2U64(drmMalloc(ovr.count_format_types *
+ sizeof(uint32_t)));
+ if (!ovr.format_type_ptr)
+ goto err_allocs;
+ }
+
+ if (ovr.count_format_modifiers) {
+ ovr.format_modifier_ptr =
+ VOID2U64(drmMalloc(ovr.count_format_modifiers *
+ sizeof(struct drm_format_modifier)));
+ if (!ovr.format_modifier_ptr)
+ goto err_allocs;
+ }
+
+ if (drmIoctl(fd, cmd, &ovr))
+ goto err_allocs;
+
+ if (counts.count_format_types < ovr.count_format_types ||
+ counts.count_format_modifiers < ovr.count_format_modifiers) {
+ drmFree(U642VOID(ovr.format_type_ptr));
+ drmFree(U642VOID(ovr.format_modifier_ptr));
+ goto retry;
+ }
+
+ if (!(r = drmMalloc(sizeof(*r))))
+ goto err_allocs;
+
+ r->count_formats = ovr.count_format_types;
+ r->count_format_modifiers = ovr.count_format_modifiers;
+ r->plane_id = ovr.plane_id;
+ r->crtc_id = ovr.crtc_id;
+ r->fb_id = ovr.fb_id;
+ r->possible_crtcs = ovr.possible_crtcs;
+ r->gamma_size = ovr.gamma_size;
+ r->formats = drmAllocCpy(U642VOID(ovr.format_type_ptr),
+ ovr.count_format_types, sizeof(uint32_t));
+ if (ovr.count_format_types && !r->formats) {
+ drmFree(r->formats);
+ drmFree(r);
+ r = 0;
+ goto err_allocs;
+ }
+
+ r->format_modifiers =
+ drmAllocCpy(U642VOID(ovr.format_modifier_ptr),
+ ovr.count_format_modifiers,
+ sizeof(struct drm_format_modifier));
+ if (ovr.count_format_modifiers && !r->format_modifiers) {
+ drmFree(r->formats);
+ drmFree(r);
+ r = 0;
+ }
+
+err_allocs:
+ drmFree(U642VOID(ovr.format_type_ptr));
+ drmFree(U642VOID(ovr.format_modifier_ptr));
+
+ return r;
+}
+
+drmModePlanePtr drmModeGetPlane2(int fd, uint32_t plane_id)
+{
+ drmModePlanePtr r = get_plane(DRM_IOCTL_MODE_GETPLANE2, fd, plane_id);
+
+ if (r || errno != EINVAL)
+ return r;
+
+ return get_plane(DRM_IOCTL_MODE_GETPLANE, fd, plane_id);
+}
+
+drmModePlanePtr drmModeGetPlane(int fd, uint32_t plane_id)
+{
+ return get_plane(DRM_IOCTL_MODE_GETPLANE, fd, plane_id);
+}
+
+void drmModeFreePlane(drmModePlanePtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr->formats);
+ drmFree(ptr->format_modifiers);
+ drmFree(ptr);
+}
+
+drmModePlaneResPtr drmModeGetPlaneResources(int fd)
+{
+ struct drm_mode_get_plane_res res, counts;
+ drmModePlaneResPtr r = 0;
+
+retry:
+ memclear(res);
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPLANERESOURCES, &res))
+ return 0;
+
+ counts = res;
+
+ if (res.count_planes) {
+ res.plane_id_ptr = VOID2U64(drmMalloc(res.count_planes *
+ sizeof(uint32_t)));
+ if (!res.plane_id_ptr)
+ goto err_allocs;
+ }
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPLANERESOURCES, &res))
+ goto err_allocs;
+
+ if (counts.count_planes < res.count_planes) {
+ drmFree(U642VOID(res.plane_id_ptr));
+ goto retry;
+ }
+
+ if (!(r = drmMalloc(sizeof(*r))))
+ goto err_allocs;
+
+ r->count_planes = res.count_planes;
+ r->planes = drmAllocCpy(U642VOID(res.plane_id_ptr),
+ res.count_planes, sizeof(uint32_t));
+ if (res.count_planes && !r->planes) {
+ drmFree(r->planes);
+ drmFree(r);
+ r = 0;
+ }
+
+err_allocs:
+ drmFree(U642VOID(res.plane_id_ptr));
+
+ return r;
+}
+
+void drmModeFreePlaneResources(drmModePlaneResPtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr->planes);
+ drmFree(ptr);
+}
+
+drmModeObjectPropertiesPtr drmModeObjectGetProperties(int fd,
+ uint32_t object_id,
+ uint32_t object_type)
+{
+ struct drm_mode_obj_get_properties properties;
+ drmModeObjectPropertiesPtr ret = NULL;
+ uint32_t count;
+
+retry:
+ memclear(properties);
+ properties.obj_id = object_id;
+ properties.obj_type = object_type;
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_OBJ_GETPROPERTIES, &properties))
+ return 0;
+
+ count = properties.count_props;
+
+ if (count) {
+ properties.props_ptr = VOID2U64(drmMalloc(count *
+ sizeof(uint32_t)));
+ if (!properties.props_ptr)
+ goto err_allocs;
+ properties.prop_values_ptr = VOID2U64(drmMalloc(count *
+ sizeof(uint64_t)));
+ if (!properties.prop_values_ptr)
+ goto err_allocs;
+ }
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_OBJ_GETPROPERTIES, &properties))
+ goto err_allocs;
+
+ if (count < properties.count_props) {
+ drmFree(U642VOID(properties.props_ptr));
+ drmFree(U642VOID(properties.prop_values_ptr));
+ goto retry;
+ }
+ count = properties.count_props;
+
+ ret = drmMalloc(sizeof(*ret));
+ if (!ret)
+ goto err_allocs;
+
+ ret->count_props = count;
+ ret->props = drmAllocCpy(U642VOID(properties.props_ptr),
+ count, sizeof(uint32_t));
+ ret->prop_values = drmAllocCpy(U642VOID(properties.prop_values_ptr),
+ count, sizeof(uint64_t));
+ if (ret->count_props && (!ret->props || !ret->prop_values)) {
+ drmFree(ret->props);
+ drmFree(ret->prop_values);
+ drmFree(ret);
+ ret = NULL;
+ }
+
+err_allocs:
+ drmFree(U642VOID(properties.props_ptr));
+ drmFree(U642VOID(properties.prop_values_ptr));
+ return ret;
+}
+
+void drmModeFreeObjectProperties(drmModeObjectPropertiesPtr ptr)
+{
+ if (!ptr)
+ return;
+ drmFree(ptr->props);
+ drmFree(ptr->prop_values);
+ drmFree(ptr);
+}
+
+int drmModeObjectSetProperty(int fd, uint32_t object_id, uint32_t object_type,
+ uint32_t property_id, uint64_t value)
+{
+ struct drm_mode_obj_set_property prop;
+
+ memclear(prop);
+ prop.value = value;
+ prop.prop_id = property_id;
+ prop.obj_id = object_id;
+ prop.obj_type = object_type;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_OBJ_SETPROPERTY, &prop);
+}
+
+typedef struct _drmModeAtomicReqItem drmModeAtomicReqItem, *drmModeAtomicReqItemPtr;
+
+struct _drmModeAtomicReqItem {
+ uint32_t object_id;
+ uint32_t property_id;
+ uint64_t value;
+};
+
+struct _drmModeAtomicReq {
+ uint32_t cursor;
+ uint32_t size_items;
+ drmModeAtomicReqItemPtr items;
+};
+
+drmModeAtomicReqPtr drmModeAtomicAlloc(void)
+{
+ drmModeAtomicReqPtr req;
+
+ req = drmMalloc(sizeof *req);
+ if (!req)
+ return NULL;
+
+ req->items = NULL;
+ req->cursor = 0;
+ req->size_items = 0;
+
+ return req;
+}
+
+drmModeAtomicReqPtr drmModeAtomicDuplicate(drmModeAtomicReqPtr old)
+{
+ drmModeAtomicReqPtr new;
+
+ if (!old)
+ return NULL;
+
+ new = drmMalloc(sizeof *new);
+ if (!new)
+ return NULL;
+
+ new->cursor = old->cursor;
+ new->size_items = old->size_items;
+
+ if (old->size_items) {
+ new->items = drmMalloc(old->size_items * sizeof(*new->items));
+ if (!new->items) {
+ free(new);
+ return NULL;
+ }
+ memcpy(new->items, old->items,
+ old->size_items * sizeof(*new->items));
+ } else {
+ new->items = NULL;
+ }
+
+ return new;
+}
+
+int drmModeAtomicMerge(drmModeAtomicReqPtr base, drmModeAtomicReqPtr augment)
+{
+ if (!base)
+ return -EINVAL;
+
+ if (!augment || augment->cursor == 0)
+ return 0;
+
+ if (base->cursor + augment->cursor >= base->size_items) {
+ drmModeAtomicReqItemPtr new;
+ int saved_size = base->size_items;
+
+ base->size_items = base->cursor + augment->cursor;
+ new = realloc(base->items,
+ base->size_items * sizeof(*base->items));
+ if (!new) {
+ base->size_items = saved_size;
+ return -ENOMEM;
+ }
+ base->items = new;
+ }
+
+ memcpy(&base->items[base->cursor], augment->items,
+ augment->cursor * sizeof(*augment->items));
+ base->cursor += augment->cursor;
+
+ return 0;
+}
+
+int drmModeAtomicGetCursor(drmModeAtomicReqPtr req)
+{
+ if (!req)
+ return -EINVAL;
+ return req->cursor;
+}
+
+void drmModeAtomicSetCursor(drmModeAtomicReqPtr req, int cursor)
+{
+ if (req)
+ req->cursor = cursor;
+}
+
+int drmModeAtomicAddProperty(drmModeAtomicReqPtr req,
+ uint32_t object_id,
+ uint32_t property_id,
+ uint64_t value)
+{
+ if (!req)
+ return -EINVAL;
+
+ if (req->cursor >= req->size_items) {
+ drmModeAtomicReqItemPtr new;
+
+ req->size_items += 16;
+ new = realloc(req->items, req->size_items * sizeof(*req->items));
+ if (!new) {
+ req->size_items -= 16;
+ return -ENOMEM;
+ }
+ req->items = new;
+ }
+
+ req->items[req->cursor].object_id = object_id;
+ req->items[req->cursor].property_id = property_id;
+ req->items[req->cursor].value = value;
+ req->cursor++;
+
+ return req->cursor;
+}
+
+void drmModeAtomicFree(drmModeAtomicReqPtr req)
+{
+ if (!req)
+ return;
+
+ if (req->items)
+ drmFree(req->items);
+ drmFree(req);
+}
+
+static int sort_req_list(const void *misc, const void *other)
+{
+ const drmModeAtomicReqItem *first = misc;
+ const drmModeAtomicReqItem *second = other;
+
+ if (first->object_id < second->object_id)
+ return -1;
+ else if (first->object_id > second->object_id)
+ return 1;
+ else
+ return second->property_id - first->property_id;
+}
+
+int drmModeAtomicCommit(int fd, drmModeAtomicReqPtr req, uint32_t flags,
+ void *user_data)
+{
+ drmModeAtomicReqPtr sorted;
+ struct drm_mode_atomic atomic;
+ uint32_t *objs_ptr = NULL;
+ uint32_t *count_props_ptr = NULL;
+ uint32_t *props_ptr = NULL;
+ uint64_t *prop_values_ptr = NULL;
+ uint32_t last_obj_id = 0;
+ uint32_t i;
+ int obj_idx = -1;
+ int ret = -1;
+
+ if (!req)
+ return -EINVAL;
+
+ if (req->cursor == 0)
+ return 0;
+
+ sorted = drmModeAtomicDuplicate(req);
+ if (sorted == NULL)
+ return -ENOMEM;
+
+ memclear(atomic);
+
+ /* Sort the list by object ID, then by property ID. */
+ qsort(sorted->items, sorted->cursor, sizeof(*sorted->items),
+ sort_req_list);
+
+ /* Now the list is sorted, eliminate duplicate property sets. */
+ for (i = 0; i < sorted->cursor; i++) {
+ if (sorted->items[i].object_id != last_obj_id) {
+ atomic.count_objs++;
+ last_obj_id = sorted->items[i].object_id;
+ }
+
+ if (i == sorted->cursor - 1)
+ continue;
+
+ if (sorted->items[i].object_id != sorted->items[i + 1].object_id ||
+ sorted->items[i].property_id != sorted->items[i + 1].property_id)
+ continue;
+
+ memmove(&sorted->items[i], &sorted->items[i + 1],
+ (sorted->cursor - i - 1) * sizeof(*sorted->items));
+ sorted->cursor--;
+ }
+
+ objs_ptr = drmMalloc(atomic.count_objs * sizeof objs_ptr[0]);
+ if (!objs_ptr) {
+ errno = ENOMEM;
+ goto out;
+ }
+
+ count_props_ptr = drmMalloc(atomic.count_objs * sizeof count_props_ptr[0]);
+ if (!count_props_ptr) {
+ errno = ENOMEM;
+ goto out;
+ }
+
+ props_ptr = drmMalloc(sorted->cursor * sizeof props_ptr[0]);
+ if (!props_ptr) {
+ errno = ENOMEM;
+ goto out;
+ }
+
+ prop_values_ptr = drmMalloc(sorted->cursor * sizeof prop_values_ptr[0]);
+ if (!prop_values_ptr) {
+ errno = ENOMEM;
+ goto out;
+ }
+
+ for (i = 0, last_obj_id = 0; i < sorted->cursor; i++) {
+ if (sorted->items[i].object_id != last_obj_id) {
+ obj_idx++;
+ objs_ptr[obj_idx] = sorted->items[i].object_id;
+ last_obj_id = objs_ptr[obj_idx];
+ }
+
+ count_props_ptr[obj_idx]++;
+ props_ptr[i] = sorted->items[i].property_id;
+ prop_values_ptr[i] = sorted->items[i].value;
+
+ }
+
+ atomic.flags = flags;
+ atomic.objs_ptr = VOID2U64(objs_ptr);
+ atomic.count_props_ptr = VOID2U64(count_props_ptr);
+ atomic.props_ptr = VOID2U64(props_ptr);
+ atomic.prop_values_ptr = VOID2U64(prop_values_ptr);
+ atomic.user_data = VOID2U64(user_data);
+
+ ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ATOMIC, &atomic);
+
+out:
+ drmFree(objs_ptr);
+ drmFree(count_props_ptr);
+ drmFree(props_ptr);
+ drmFree(prop_values_ptr);
+ drmModeAtomicFree(sorted);
+
+ return ret;
+}
+
+int
+drmModeCreatePropertyBlob(int fd, const void *data, size_t length, uint32_t *id)
+{
+ struct drm_mode_create_blob create;
+ int ret;
+
+ if (length >= 0xffffffff)
+ return -ERANGE;
+
+ memclear(create);
+
+ create.length = length;
+ create.data = (uintptr_t) data;
+ create.blob_id = 0;
+ *id = 0;
+
+ ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_CREATEPROPBLOB, &create);
+ if (ret != 0)
+ return ret;
+
+ *id = create.blob_id;
+ return 0;
+}
+
+int
+drmModeDestroyPropertyBlob(int fd, uint32_t id)
+{
+ struct drm_mode_destroy_blob destroy;
+
+ memclear(destroy);
+ destroy.blob_id = id;
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_DESTROYPROPBLOB, &destroy);
+}
diff --git a/chromium/third_party/libdrm/src/xf86drmMode.h b/chromium/third_party/libdrm/src/xf86drmMode.h
new file mode 100644
index 00000000000..3ecc2eb4e68
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86drmMode.h
@@ -0,0 +1,526 @@
+/*
+ * \file xf86drmMode.h
+ * Header for DRM modesetting interface.
+ *
+ * \author Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ * \par Acknowledgements:
+ * Feb 2007, Dave Airlie <airlied@linux.ie>
+ */
+
+/*
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2007-2008 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _XF86DRMMODE_H_
+#define _XF86DRMMODE_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <drm.h>
+
+/*
+ * This is the interface for modesetting for drm.
+ *
+ * In order to use this interface you must include either <stdint.h> or another
+ * header defining uint32_t, int32_t and uint16_t.
+ *
+ * It aims to provide a randr1.2 compatible interface for modesettings in the
+ * kernel, the interface is also ment to be used by libraries like EGL.
+ *
+ * More information can be found in randrproto.txt which can be found here:
+ * http://gitweb.freedesktop.org/?p=xorg/proto/randrproto.git
+ *
+ * There are some major diffrences to be noted. Unlike the randr1.2 proto you
+ * need to create the memory object of the framebuffer yourself with the ttm
+ * buffer object interface. This object needs to be pinned.
+ */
+
+/*
+ * If we pickup an old version of drm.h which doesn't include drm_mode.h
+ * we should redefine defines. This is so that builds doesn't breaks with
+ * new libdrm on old kernels.
+ */
+#ifndef _DRM_MODE_H
+
+#define DRM_DISPLAY_INFO_LEN 32
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN (1<<0)
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_PREFERRED (1<<3)
+#define DRM_MODE_TYPE_DEFAULT (1<<4)
+#define DRM_MODE_TYPE_USERDEF (1<<5)
+#define DRM_MODE_TYPE_DRIVER (1<<6)
+
+/* Video mode flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_FLAG_PHSYNC (1<<0)
+#define DRM_MODE_FLAG_NHSYNC (1<<1)
+#define DRM_MODE_FLAG_PVSYNC (1<<2)
+#define DRM_MODE_FLAG_NVSYNC (1<<3)
+#define DRM_MODE_FLAG_INTERLACE (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN (1<<5)
+#define DRM_MODE_FLAG_CSYNC (1<<6)
+#define DRM_MODE_FLAG_PCSYNC (1<<7)
+#define DRM_MODE_FLAG_NCSYNC (1<<8)
+#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST (1<<10)
+#define DRM_MODE_FLAG_PIXMUX (1<<11)
+#define DRM_MODE_FLAG_DBLCLK (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
+#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
+#define DRM_MODE_FLAG_3D_NONE (0<<14)
+#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
+#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
+#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
+#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+
+/* DPMS flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_DPMS_ON 0
+#define DRM_MODE_DPMS_STANDBY 1
+#define DRM_MODE_DPMS_SUSPEND 2
+#define DRM_MODE_DPMS_OFF 3
+
+/* Scaling mode options */
+#define DRM_MODE_SCALE_NON_GPU 0
+#define DRM_MODE_SCALE_FULLSCREEN 1
+#define DRM_MODE_SCALE_NO_SCALE 2
+#define DRM_MODE_SCALE_ASPECT 3
+
+/* Dithering mode options */
+#define DRM_MODE_DITHERING_OFF 0
+#define DRM_MODE_DITHERING_ON 1
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
+#define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI 6
+
+#define DRM_MODE_SUBCONNECTOR_Automatic 0
+#define DRM_MODE_SUBCONNECTOR_Unknown 0
+#define DRM_MODE_SUBCONNECTOR_DVID 3
+#define DRM_MODE_SUBCONNECTOR_DVIA 4
+#define DRM_MODE_SUBCONNECTOR_Composite 5
+#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
+#define DRM_MODE_SUBCONNECTOR_Component 8
+#define DRM_MODE_SUBCONNECTOR_SCART 9
+
+#define DRM_MODE_CONNECTOR_Unknown 0
+#define DRM_MODE_CONNECTOR_VGA 1
+#define DRM_MODE_CONNECTOR_DVII 2
+#define DRM_MODE_CONNECTOR_DVID 3
+#define DRM_MODE_CONNECTOR_DVIA 4
+#define DRM_MODE_CONNECTOR_Composite 5
+#define DRM_MODE_CONNECTOR_SVIDEO 6
+#define DRM_MODE_CONNECTOR_LVDS 7
+#define DRM_MODE_CONNECTOR_Component 8
+#define DRM_MODE_CONNECTOR_9PinDIN 9
+#define DRM_MODE_CONNECTOR_DisplayPort 10
+#define DRM_MODE_CONNECTOR_HDMIA 11
+#define DRM_MODE_CONNECTOR_HDMIB 12
+#define DRM_MODE_CONNECTOR_TV 13
+#define DRM_MODE_CONNECTOR_eDP 14
+#define DRM_MODE_CONNECTOR_VIRTUAL 15
+#define DRM_MODE_CONNECTOR_DSI 16
+
+#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
+#define DRM_MODE_PROP_BLOB (1<<4)
+
+#define DRM_MODE_CURSOR_BO (1<<0)
+#define DRM_MODE_CURSOR_MOVE (1<<1)
+
+#endif /* _DRM_MODE_H */
+
+
+/*
+ * Feature defines
+ *
+ * Just because these are defined doesn't mean that the kernel
+ * can do that feature, its just for new code vs old libdrm.
+ */
+#define DRM_MODE_FEATURE_KMS 1
+#define DRM_MODE_FEATURE_DIRTYFB 1
+
+
+typedef struct _drmModeRes {
+
+ int count_fbs;
+ uint32_t *fbs;
+
+ int count_crtcs;
+ uint32_t *crtcs;
+
+ int count_connectors;
+ uint32_t *connectors;
+
+ int count_encoders;
+ uint32_t *encoders;
+
+ uint32_t min_width, max_width;
+ uint32_t min_height, max_height;
+} drmModeRes, *drmModeResPtr;
+
+typedef struct _drmModeModeInfo {
+ uint32_t clock;
+ uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
+ uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+ uint32_t vrefresh;
+
+ uint32_t flags;
+ uint32_t type;
+ char name[DRM_DISPLAY_MODE_LEN];
+} drmModeModeInfo, *drmModeModeInfoPtr;
+
+typedef struct _drmModeFB {
+ uint32_t fb_id;
+ uint32_t width, height;
+ uint32_t pitch;
+ uint32_t bpp;
+ uint32_t depth;
+ /* driver specific handle */
+ uint32_t handle;
+} drmModeFB, *drmModeFBPtr;
+
+typedef struct drm_clip_rect drmModeClip, *drmModeClipPtr;
+
+typedef struct _drmModePropertyBlob {
+ uint32_t id;
+ uint32_t length;
+ void *data;
+} drmModePropertyBlobRes, *drmModePropertyBlobPtr;
+
+typedef struct _drmModeProperty {
+ uint32_t prop_id;
+ uint32_t flags;
+ char name[DRM_PROP_NAME_LEN];
+ int count_values;
+ uint64_t *values; /* store the blob lengths */
+ int count_enums;
+ struct drm_mode_property_enum *enums;
+ int count_blobs;
+ uint32_t *blob_ids; /* store the blob IDs */
+} drmModePropertyRes, *drmModePropertyPtr;
+
+static __inline int drm_property_type_is(drmModePropertyPtr property,
+ uint32_t type)
+{
+ /* instanceof for props.. handles extended type vs original types: */
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
+ return property->flags & type;
+}
+
+typedef struct _drmModeCrtc {
+ uint32_t crtc_id;
+ uint32_t buffer_id; /**< FB id to connect to 0 = disconnect */
+
+ uint32_t x, y; /**< Position on the framebuffer */
+ uint32_t width, height;
+ int mode_valid;
+ drmModeModeInfo mode;
+
+ int gamma_size; /**< Number of gamma stops */
+
+} drmModeCrtc, *drmModeCrtcPtr;
+
+typedef struct _drmModeEncoder {
+ uint32_t encoder_id;
+ uint32_t encoder_type;
+ uint32_t crtc_id;
+ uint32_t possible_crtcs;
+ uint32_t possible_clones;
+} drmModeEncoder, *drmModeEncoderPtr;
+
+typedef enum {
+ DRM_MODE_CONNECTED = 1,
+ DRM_MODE_DISCONNECTED = 2,
+ DRM_MODE_UNKNOWNCONNECTION = 3
+} drmModeConnection;
+
+typedef enum {
+ DRM_MODE_SUBPIXEL_UNKNOWN = 1,
+ DRM_MODE_SUBPIXEL_HORIZONTAL_RGB = 2,
+ DRM_MODE_SUBPIXEL_HORIZONTAL_BGR = 3,
+ DRM_MODE_SUBPIXEL_VERTICAL_RGB = 4,
+ DRM_MODE_SUBPIXEL_VERTICAL_BGR = 5,
+ DRM_MODE_SUBPIXEL_NONE = 6
+} drmModeSubPixel;
+
+typedef struct _drmModeConnector {
+ uint32_t connector_id;
+ uint32_t encoder_id; /**< Encoder currently connected to */
+ uint32_t connector_type;
+ uint32_t connector_type_id;
+ drmModeConnection connection;
+ uint32_t mmWidth, mmHeight; /**< HxW in millimeters */
+ drmModeSubPixel subpixel;
+
+ int count_modes;
+ drmModeModeInfoPtr modes;
+
+ int count_props;
+ uint32_t *props; /**< List of property ids */
+ uint64_t *prop_values; /**< List of property values */
+
+ int count_encoders;
+ uint32_t *encoders; /**< List of encoder ids */
+} drmModeConnector, *drmModeConnectorPtr;
+
+#define DRM_PLANE_TYPE_OVERLAY 0
+#define DRM_PLANE_TYPE_PRIMARY 1
+#define DRM_PLANE_TYPE_CURSOR 2
+
+typedef struct _drmModeObjectProperties {
+ uint32_t count_props;
+ uint32_t *props;
+ uint64_t *prop_values;
+} drmModeObjectProperties, *drmModeObjectPropertiesPtr;
+
+typedef struct _drmModePlane {
+ uint32_t count_formats;
+ uint32_t *formats;
+ uint32_t plane_id;
+
+ uint32_t crtc_id;
+ uint32_t fb_id;
+
+ uint32_t crtc_x, crtc_y;
+ uint32_t x, y;
+
+ uint32_t possible_crtcs;
+ uint32_t gamma_size;
+
+ uint32_t count_format_modifiers;
+ struct drm_format_modifier *format_modifiers;
+} drmModePlane, *drmModePlanePtr;
+
+typedef struct _drmModePlaneRes {
+ uint32_t count_planes;
+ uint32_t *planes;
+} drmModePlaneRes, *drmModePlaneResPtr;
+
+extern void drmModeFreeModeInfo( drmModeModeInfoPtr ptr );
+extern void drmModeFreeResources( drmModeResPtr ptr );
+extern void drmModeFreeFB( drmModeFBPtr ptr );
+extern void drmModeFreeCrtc( drmModeCrtcPtr ptr );
+extern void drmModeFreeConnector( drmModeConnectorPtr ptr );
+extern void drmModeFreeEncoder( drmModeEncoderPtr ptr );
+extern void drmModeFreePlane( drmModePlanePtr ptr );
+extern void drmModeFreePlaneResources(drmModePlaneResPtr ptr);
+
+/**
+ * Retrives all of the resources associated with a card.
+ */
+extern drmModeResPtr drmModeGetResources(int fd);
+
+/*
+ * FrameBuffer manipulation.
+ */
+
+/**
+ * Retrive information about framebuffer bufferId
+ */
+extern drmModeFBPtr drmModeGetFB(int fd, uint32_t bufferId);
+
+/**
+ * Creates a new framebuffer with an buffer object as its scanout buffer.
+ */
+extern int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
+ uint8_t bpp, uint32_t pitch, uint32_t bo_handle,
+ uint32_t *buf_id);
+/* ...with a specific pixel format */
+extern int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
+ uint32_t pixel_format, uint32_t bo_handles[4],
+ uint32_t pitches[4], uint32_t offsets[4],
+ uint32_t *buf_id, uint32_t flags);
+
+/* ...with format modifiers */
+int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height,
+ uint32_t pixel_format, uint32_t bo_handles[4],
+ uint32_t pitches[4], uint32_t offsets[4],
+ uint64_t modifier[4], uint32_t *buf_id, uint32_t flags);
+
+/**
+ * Destroies the given framebuffer.
+ */
+extern int drmModeRmFB(int fd, uint32_t bufferId);
+
+/**
+ * Mark a region of a framebuffer as dirty.
+ */
+extern int drmModeDirtyFB(int fd, uint32_t bufferId,
+ drmModeClipPtr clips, uint32_t num_clips);
+
+
+/*
+ * Crtc functions
+ */
+
+/**
+ * Retrive information about the ctrt crtcId
+ */
+extern drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId);
+
+/**
+ * Set the mode on a crtc crtcId with the given mode modeId.
+ */
+int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
+ uint32_t x, uint32_t y, uint32_t *connectors, int count,
+ drmModeModeInfoPtr mode);
+
+/*
+ * Cursor functions
+ */
+
+/**
+ * Set the cursor on crtc
+ */
+int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height);
+
+int drmModeSetCursor2(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y);
+/**
+ * Move the cursor on crtc
+ */
+int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y);
+
+/**
+ * Encoder functions
+ */
+drmModeEncoderPtr drmModeGetEncoder(int fd, uint32_t encoder_id);
+
+/*
+ * Connector manipulation
+ */
+
+/**
+ * Retrieve all information about the connector connectorId. This will do a
+ * forced probe on the connector to retrieve remote information such as EDIDs
+ * from the display device.
+ */
+extern drmModeConnectorPtr drmModeGetConnector(int fd,
+ uint32_t connectorId);
+
+/**
+ * Retrieve current information, i.e the currently active mode and encoder,
+ * about the connector connectorId. This will not do any probing on the
+ * connector or remote device, and only reports what is currently known.
+ * For the complete set of modes and encoders associated with the connector
+ * use drmModeGetConnector() which will do a probe to determine any display
+ * link changes first.
+ */
+extern drmModeConnectorPtr drmModeGetConnectorCurrent(int fd,
+ uint32_t connector_id);
+
+/**
+ * Attaches the given mode to an connector.
+ */
+extern int drmModeAttachMode(int fd, uint32_t connectorId, drmModeModeInfoPtr mode_info);
+
+/**
+ * Detaches a mode from the connector
+ * must be unused, by the given mode.
+ */
+extern int drmModeDetachMode(int fd, uint32_t connectorId, drmModeModeInfoPtr mode_info);
+
+extern drmModePropertyPtr drmModeGetProperty(int fd, uint32_t propertyId);
+extern void drmModeFreeProperty(drmModePropertyPtr ptr);
+
+extern drmModePropertyBlobPtr drmModeGetPropertyBlob(int fd, uint32_t blob_id);
+extern void drmModeFreePropertyBlob(drmModePropertyBlobPtr ptr);
+extern int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property_id,
+ uint64_t value);
+extern int drmCheckModesettingSupported(const char *busid);
+
+extern int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
+ uint16_t *red, uint16_t *green, uint16_t *blue);
+extern int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
+ uint16_t *red, uint16_t *green, uint16_t *blue);
+extern int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
+ uint32_t flags, void *user_data);
+
+extern drmModePlaneResPtr drmModeGetPlaneResources(int fd);
+extern drmModePlanePtr drmModeGetPlane(int fd, uint32_t plane_id);
+extern drmModePlanePtr drmModeGetPlane2(int fd, uint32_t plane_id);
+extern int drmModeSetPlane(int fd, uint32_t plane_id, uint32_t crtc_id,
+ uint32_t fb_id, uint32_t flags,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+
+extern drmModeObjectPropertiesPtr drmModeObjectGetProperties(int fd,
+ uint32_t object_id,
+ uint32_t object_type);
+extern void drmModeFreeObjectProperties(drmModeObjectPropertiesPtr ptr);
+extern int drmModeObjectSetProperty(int fd, uint32_t object_id,
+ uint32_t object_type, uint32_t property_id,
+ uint64_t value);
+
+
+typedef struct _drmModeAtomicReq drmModeAtomicReq, *drmModeAtomicReqPtr;
+
+extern drmModeAtomicReqPtr drmModeAtomicAlloc(void);
+extern drmModeAtomicReqPtr drmModeAtomicDuplicate(drmModeAtomicReqPtr req);
+extern int drmModeAtomicMerge(drmModeAtomicReqPtr base,
+ drmModeAtomicReqPtr augment);
+extern void drmModeAtomicFree(drmModeAtomicReqPtr req);
+extern int drmModeAtomicGetCursor(drmModeAtomicReqPtr req);
+extern void drmModeAtomicSetCursor(drmModeAtomicReqPtr req, int cursor);
+extern int drmModeAtomicAddProperty(drmModeAtomicReqPtr req,
+ uint32_t object_id,
+ uint32_t property_id,
+ uint64_t value);
+extern int drmModeAtomicCommit(int fd,
+ drmModeAtomicReqPtr req,
+ uint32_t flags,
+ void *user_data);
+
+extern int drmModeCreatePropertyBlob(int fd, const void *data, size_t size,
+ uint32_t *id);
+extern int drmModeDestroyPropertyBlob(int fd, uint32_t id);
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/chromium/third_party/libdrm/src/xf86drmRandom.c b/chromium/third_party/libdrm/src/xf86drmRandom.c
new file mode 100644
index 00000000000..81f03014fa5
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86drmRandom.c
@@ -0,0 +1,137 @@
+/* xf86drmRandom.c -- "Minimal Standard" PRNG Implementation
+ * Created: Mon Apr 19 08:28:13 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * DESCRIPTION
+ *
+ * This file contains a simple, straightforward implementation of the Park
+ * & Miller "Minimal Standard" PRNG [PM88, PMS93], which is a Lehmer
+ * multiplicative linear congruential generator (MLCG) with a period of
+ * 2^31-1.
+ *
+ * This implementation is intended to provide a reliable, portable PRNG
+ * that is suitable for testing a hash table implementation and for
+ * implementing skip lists.
+ *
+ * FUTURE ENHANCEMENTS
+ *
+ * If initial seeds are not selected randomly, two instances of the PRNG
+ * can be correlated. [Knuth81, pp. 32-33] describes a shuffling technique
+ * that can eliminate this problem.
+ *
+ * If PRNGs are used for simulation, the period of the current
+ * implementation may be too short. [LE88] discusses methods of combining
+ * MLCGs to produce much longer periods, and suggests some alternative
+ * values for A and M. [LE90 and Sch92] also provide information on
+ * long-period PRNGs.
+ *
+ * REFERENCES
+ *
+ * [Knuth81] Donald E. Knuth. The Art of Computer Programming. Volume 2:
+ * Seminumerical Algorithms. Reading, Massachusetts: Addison-Wesley, 1981.
+ *
+ * [LE88] Pierre L'Ecuyer. "Efficient and Portable Combined Random Number
+ * Generators". CACM 31(6), June 1988, pp. 742-774.
+ *
+ * [LE90] Pierre L'Ecuyer. "Random Numbers for Simulation". CACM 33(10,
+ * October 1990, pp. 85-97.
+ *
+ * [PM88] Stephen K. Park and Keith W. Miller. "Random Number Generators:
+ * Good Ones are Hard to Find". CACM 31(10), October 1988, pp. 1192-1201.
+ *
+ * [Sch92] Bruce Schneier. "Pseudo-Ransom Sequence Generator for 32-Bit
+ * CPUs". Dr. Dobb's Journal 17(2), February 1992, pp. 34, 37-38, 40.
+ *
+ * [PMS93] Stephen K. Park, Keith W. Miller, and Paul K. Stockmeyer. In
+ * "Technical Correspondence: Remarks on Choosing and Implementing Random
+ * Number Generators". CACM 36(7), July 1993, pp. 105-110.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "xf86drm.h"
+#include "xf86drmRandom.h"
+
+#define RANDOM_MAGIC 0xfeedbeef
+
+void *drmRandomCreate(unsigned long seed)
+{
+ RandomState *state;
+
+ state = drmMalloc(sizeof(*state));
+ if (!state) return NULL;
+ state->magic = RANDOM_MAGIC;
+#if 0
+ /* Park & Miller, October 1988 */
+ state->a = 16807;
+ state->m = 2147483647;
+ state->check = 1043618065; /* After 10000 iterations */
+#else
+ /* Park, Miller, and Stockmeyer, July 1993 */
+ state->a = 48271;
+ state->m = 2147483647;
+ state->check = 399268537; /* After 10000 iterations */
+#endif
+ state->q = state->m / state->a;
+ state->r = state->m % state->a;
+
+ state->seed = seed;
+ /* Check for illegal boundary conditions,
+ and choose closest legal value. */
+ if (state->seed <= 0) state->seed = 1;
+ if (state->seed >= state->m) state->seed = state->m - 1;
+
+ return state;
+}
+
+int drmRandomDestroy(void *state)
+{
+ drmFree(state);
+ return 0;
+}
+
+unsigned long drmRandom(void *state)
+{
+ RandomState *s = (RandomState *)state;
+ unsigned long hi;
+ unsigned long lo;
+
+ hi = s->seed / s->q;
+ lo = s->seed % s->q;
+ s->seed = s->a * lo - s->r * hi;
+ if ((s->a * lo) <= (s->r * hi)) s->seed += s->m;
+
+ return s->seed;
+}
+
+double drmRandomDouble(void *state)
+{
+ RandomState *s = (RandomState *)state;
+
+ return (double)drmRandom(state)/(double)s->m;
+}
diff --git a/chromium/third_party/libdrm/src/xf86drmRandom.h b/chromium/third_party/libdrm/src/xf86drmRandom.h
new file mode 100644
index 00000000000..43b730ce01f
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86drmRandom.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+typedef struct RandomState {
+ unsigned long magic;
+ unsigned long a;
+ unsigned long m;
+ unsigned long q; /* m div a */
+ unsigned long r; /* m mod a */
+ unsigned long check;
+ unsigned long seed;
+} RandomState;
diff --git a/chromium/third_party/libdrm/src/xf86drmSL.c b/chromium/third_party/libdrm/src/xf86drmSL.c
new file mode 100644
index 00000000000..a12fa1d0063
--- /dev/null
+++ b/chromium/third_party/libdrm/src/xf86drmSL.c
@@ -0,0 +1,318 @@
+/* xf86drmSL.c -- Skip list support
+ * Created: Mon May 10 09:28:13 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * DESCRIPTION
+ *
+ * This file contains a straightforward skip list implementation.n
+ *
+ * FUTURE ENHANCEMENTS
+ *
+ * REFERENCES
+ *
+ * [Pugh90] William Pugh. Skip Lists: A Probabilistic Alternative to
+ * Balanced Trees. CACM 33(6), June 1990, pp. 668-676.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "xf86drm.h"
+
+#define SL_LIST_MAGIC 0xfacade00LU
+#define SL_ENTRY_MAGIC 0x00fab1edLU
+#define SL_FREED_MAGIC 0xdecea5edLU
+#define SL_MAX_LEVEL 16
+#define SL_RANDOM_SEED 0xc01055a1LU
+
+#define SL_RANDOM_DECL static void *state = NULL
+#define SL_RANDOM_INIT(seed) if (!state) state = drmRandomCreate(seed)
+#define SL_RANDOM drmRandom(state)
+
+typedef struct SLEntry {
+ unsigned long magic; /* SL_ENTRY_MAGIC */
+ unsigned long key;
+ void *value;
+ int levels;
+ struct SLEntry *forward[1]; /* variable sized array */
+} SLEntry, *SLEntryPtr;
+
+typedef struct SkipList {
+ unsigned long magic; /* SL_LIST_MAGIC */
+ int level;
+ int count;
+ SLEntryPtr head;
+ SLEntryPtr p0; /* Position for iteration */
+} SkipList, *SkipListPtr;
+
+static SLEntryPtr SLCreateEntry(int max_level, unsigned long key, void *value)
+{
+ SLEntryPtr entry;
+
+ if (max_level < 0 || max_level > SL_MAX_LEVEL) max_level = SL_MAX_LEVEL;
+
+ entry = drmMalloc(sizeof(*entry)
+ + (max_level + 1) * sizeof(entry->forward[0]));
+ if (!entry) return NULL;
+ entry->magic = SL_ENTRY_MAGIC;
+ entry->key = key;
+ entry->value = value;
+ entry->levels = max_level + 1;
+
+ return entry;
+}
+
+static int SLRandomLevel(void)
+{
+ int level = 1;
+ SL_RANDOM_DECL;
+
+ SL_RANDOM_INIT(SL_RANDOM_SEED);
+
+ while ((SL_RANDOM & 0x01) && level < SL_MAX_LEVEL) ++level;
+ return level;
+}
+
+void *drmSLCreate(void)
+{
+ SkipListPtr list;
+ int i;
+
+ list = drmMalloc(sizeof(*list));
+ if (!list) return NULL;
+ list->magic = SL_LIST_MAGIC;
+ list->level = 0;
+ list->head = SLCreateEntry(SL_MAX_LEVEL, 0, NULL);
+ list->count = 0;
+
+ for (i = 0; i <= SL_MAX_LEVEL; i++) list->head->forward[i] = NULL;
+
+ return list;
+}
+
+int drmSLDestroy(void *l)
+{
+ SkipListPtr list = (SkipListPtr)l;
+ SLEntryPtr entry;
+ SLEntryPtr next;
+
+ if (list->magic != SL_LIST_MAGIC) return -1; /* Bad magic */
+
+ for (entry = list->head; entry; entry = next) {
+ if (entry->magic != SL_ENTRY_MAGIC) return -1; /* Bad magic */
+ next = entry->forward[0];
+ entry->magic = SL_FREED_MAGIC;
+ drmFree(entry);
+ }
+
+ list->magic = SL_FREED_MAGIC;
+ drmFree(list);
+ return 0;
+}
+
+static SLEntryPtr SLLocate(void *l, unsigned long key, SLEntryPtr *update)
+{
+ SkipListPtr list = (SkipListPtr)l;
+ SLEntryPtr entry;
+ int i;
+
+ if (list->magic != SL_LIST_MAGIC) return NULL;
+
+ for (i = list->level, entry = list->head; i >= 0; i--) {
+ while (entry->forward[i] && entry->forward[i]->key < key)
+ entry = entry->forward[i];
+ update[i] = entry;
+ }
+
+ return entry->forward[0];
+}
+
+int drmSLInsert(void *l, unsigned long key, void *value)
+{
+ SkipListPtr list = (SkipListPtr)l;
+ SLEntryPtr entry;
+ SLEntryPtr update[SL_MAX_LEVEL + 1];
+ int level;
+ int i;
+
+ if (list->magic != SL_LIST_MAGIC) return -1; /* Bad magic */
+
+ entry = SLLocate(list, key, update);
+
+ if (entry && entry->key == key) return 1; /* Already in list */
+
+
+ level = SLRandomLevel();
+ if (level > list->level) {
+ level = ++list->level;
+ update[level] = list->head;
+ }
+
+ entry = SLCreateEntry(level, key, value);
+
+ /* Fix up forward pointers */
+ for (i = 0; i <= level; i++) {
+ entry->forward[i] = update[i]->forward[i];
+ update[i]->forward[i] = entry;
+ }
+
+ ++list->count;
+ return 0; /* Added to table */
+}
+
+int drmSLDelete(void *l, unsigned long key)
+{
+ SkipListPtr list = (SkipListPtr)l;
+ SLEntryPtr update[SL_MAX_LEVEL + 1];
+ SLEntryPtr entry;
+ int i;
+
+ if (list->magic != SL_LIST_MAGIC) return -1; /* Bad magic */
+
+ entry = SLLocate(list, key, update);
+
+ if (!entry || entry->key != key) return 1; /* Not found */
+
+ /* Fix up forward pointers */
+ for (i = 0; i <= list->level; i++) {
+ if (update[i]->forward[i] == entry)
+ update[i]->forward[i] = entry->forward[i];
+ }
+
+ entry->magic = SL_FREED_MAGIC;
+ drmFree(entry);
+
+ while (list->level && !list->head->forward[list->level]) --list->level;
+ --list->count;
+ return 0;
+}
+
+int drmSLLookup(void *l, unsigned long key, void **value)
+{
+ SkipListPtr list = (SkipListPtr)l;
+ SLEntryPtr update[SL_MAX_LEVEL + 1];
+ SLEntryPtr entry;
+
+ entry = SLLocate(list, key, update);
+
+ if (entry && entry->key == key) {
+ *value = entry;
+ return 0;
+ }
+ *value = NULL;
+ return -1;
+}
+
+int drmSLLookupNeighbors(void *l, unsigned long key,
+ unsigned long *prev_key, void **prev_value,
+ unsigned long *next_key, void **next_value)
+{
+ SkipListPtr list = (SkipListPtr)l;
+ SLEntryPtr update[SL_MAX_LEVEL + 1] = {0};
+ int retcode = 0;
+
+ SLLocate(list, key, update);
+
+ *prev_key = *next_key = key;
+ *prev_value = *next_value = NULL;
+
+ if (update[0]) {
+ *prev_key = update[0]->key;
+ *prev_value = update[0]->value;
+ ++retcode;
+ if (update[0]->forward[0]) {
+ *next_key = update[0]->forward[0]->key;
+ *next_value = update[0]->forward[0]->value;
+ ++retcode;
+ }
+ }
+ return retcode;
+}
+
+int drmSLNext(void *l, unsigned long *key, void **value)
+{
+ SkipListPtr list = (SkipListPtr)l;
+ SLEntryPtr entry;
+
+ if (list->magic != SL_LIST_MAGIC) return -1; /* Bad magic */
+
+ entry = list->p0;
+
+ if (entry) {
+ list->p0 = entry->forward[0];
+ *key = entry->key;
+ *value = entry->value;
+ return 1;
+ }
+ list->p0 = NULL;
+ return 0;
+}
+
+int drmSLFirst(void *l, unsigned long *key, void **value)
+{
+ SkipListPtr list = (SkipListPtr)l;
+
+ if (list->magic != SL_LIST_MAGIC) return -1; /* Bad magic */
+
+ list->p0 = list->head->forward[0];
+ return drmSLNext(list, key, value);
+}
+
+/* Dump internal data structures for debugging. */
+void drmSLDump(void *l)
+{
+ SkipListPtr list = (SkipListPtr)l;
+ SLEntryPtr entry;
+ int i;
+
+ if (list->magic != SL_LIST_MAGIC) {
+ printf("Bad magic: 0x%08lx (expected 0x%08lx)\n",
+ list->magic, SL_LIST_MAGIC);
+ return;
+ }
+
+ printf("Level = %d, count = %d\n", list->level, list->count);
+ for (entry = list->head; entry; entry = entry->forward[0]) {
+ if (entry->magic != SL_ENTRY_MAGIC) {
+ printf("Bad magic: 0x%08lx (expected 0x%08lx)\n",
+ list->magic, SL_ENTRY_MAGIC);
+ }
+ printf("\nEntry %p <0x%08lx, %p> has %2d levels\n",
+ entry, entry->key, entry->value, entry->levels);
+ for (i = 0; i < entry->levels; i++) {
+ if (entry->forward[i]) {
+ printf(" %2d: %p <0x%08lx, %p>\n",
+ i,
+ entry->forward[i],
+ entry->forward[i]->key,
+ entry->forward[i]->value);
+ } else {
+ printf(" %2d: %p\n", i, entry->forward[i]);
+ }
+ }
+ }
+}