summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn/src
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2021-09-03 13:32:17 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2021-10-01 14:31:55 +0200
commit21ba0c5d4bf8fba15dddd97cd693bad2358b77fd (patch)
tree91be119f694044dfc1ff9fdc054459e925de9df0 /chromium/third_party/dawn/src
parent03c549e0392f92c02536d3f86d5e1d8dfa3435ac (diff)
downloadqtwebengine-chromium-21ba0c5d4bf8fba15dddd97cd693bad2358b77fd.tar.gz
BASELINE: Update Chromium to 92.0.4515.166
Change-Id: I42a050486714e9e54fc271f2a8939223a02ae364
Diffstat (limited to 'chromium/third_party/dawn/src')
-rw-r--r--chromium/third_party/dawn/src/common/BUILD.gn24
-rw-r--r--chromium/third_party/dawn/src/common/CMakeLists.txt3
-rw-r--r--chromium/third_party/dawn/src/common/DynamicLib.cpp8
-rw-r--r--chromium/third_party/dawn/src/common/GPUInfo.cpp35
-rw-r--r--chromium/third_party/dawn/src/common/GPUInfo.h12
-rw-r--r--chromium/third_party/dawn/src/common/PlacementAllocated.h5
-rw-r--r--chromium/third_party/dawn/src/common/Platform.h8
-rw-r--r--chromium/third_party/dawn/src/common/Preprocessor.h71
-rw-r--r--chromium/third_party/dawn/src/common/SlabAllocator.cpp17
-rw-r--r--chromium/third_party/dawn/src/common/SlabAllocator.h6
-rw-r--r--chromium/third_party/dawn/src/common/VertexFormatUtils.h3
-rw-r--r--chromium/third_party/dawn/src/common/WindowsUtils.cpp13
-rw-r--r--chromium/third_party/dawn/src/common/WindowsUtils.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BUILD.gn28
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CMakeLists.txt6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.cpp37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.h47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp202
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CompilationMessages.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp142
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp (renamed from chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.cpp)71
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h68
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.h92
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.cpp143
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.h75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp54
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h58
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp158
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.cpp23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp184
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QueryHelper.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundle.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp66
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Sampler.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp204
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.h24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface.cpp182
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface.h21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.cpp47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ToBackend.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp268
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp294
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h56
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp90
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm279
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm28
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm72
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp333
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp139
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp106
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp230
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp339
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp109
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/WorkerThread.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireClient.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireServer.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp32
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.cpp32
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.h6
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireClient.h10
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireServer.h10
-rw-r--r--chromium/third_party/dawn/src/tests/BUILD.gn17
-rw-r--r--chromium/third_party/dawn/src/utils/BUILD.gn2
-rw-r--r--chromium/third_party/dawn/src/utils/BackendBinding.cpp6
-rw-r--r--chromium/third_party/dawn/src/utils/TestUtils.cpp4
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.cpp12
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.h5
142 files changed, 3662 insertions, 2318 deletions
diff --git a/chromium/third_party/dawn/src/common/BUILD.gn b/chromium/third_party/dawn/src/common/BUILD.gn
index 176e82ea414..dd76c2a83eb 100644
--- a/chromium/third_party/dawn/src/common/BUILD.gn
+++ b/chromium/third_party/dawn/src/common/BUILD.gn
@@ -71,6 +71,12 @@ config("dawn_internal") {
if (dawn_enable_opengl) {
defines += [ "DAWN_ENABLE_BACKEND_OPENGL" ]
}
+ if (dawn_enable_desktop_gl) {
+ defines += [ "DAWN_ENABLE_BACKEND_DESKTOP_GL" ]
+ }
+ if (dawn_enable_opengles) {
+ defines += [ "DAWN_ENABLE_BACKEND_OPENGLES" ]
+ }
if (dawn_enable_vulkan) {
defines += [ "DAWN_ENABLE_BACKEND_VULKAN" ]
}
@@ -89,8 +95,11 @@ config("dawn_internal") {
cflags = []
- # Enable more warnings that were found when using Dawn in other projects
- if (is_clang) {
+ # Enable more warnings that were found when using Dawn in other projects.
+ # Add them only when building in standalone because we control which clang
+ # version we use. Otherwise we risk breaking projects depending on Dawn when
+ # the use a different clang version.
+ if (dawn_standalone && is_clang) {
cflags += [
"-Wconditional-uninitialized",
"-Wcstring-format-directive",
@@ -129,6 +138,16 @@ config("dawn_internal") {
# Dawn extends wgpu enums with internal enums.
# MSVC considers these invalid switch values. crbug.com/dawn/397.
cflags += [ "/wd4063" ]
+ if (dawn_is_winuwp) {
+ # /ZW makes sure we don't add calls that are forbidden in UWP.
+ # and /EHsc is required to be used in combination with it,
+ # even if it is already added by the windows GN defaults,
+ # we still add it to make every /ZW paired with a /EHsc
+ cflags_cc = [
+ "/ZW:nostdlib",
+ "/EHsc",
+ ]
+ }
}
}
@@ -164,6 +183,7 @@ if (is_win || is_linux || is_chromeos || is_mac || is_fuchsia || is_android) {
"NonCopyable.h",
"PlacementAllocated.h",
"Platform.h",
+ "Preprocessor.h",
"RefBase.h",
"RefCounted.cpp",
"RefCounted.h",
diff --git a/chromium/third_party/dawn/src/common/CMakeLists.txt b/chromium/third_party/dawn/src/common/CMakeLists.txt
index a49e6827f8b..c73410e69a9 100644
--- a/chromium/third_party/dawn/src/common/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/common/CMakeLists.txt
@@ -36,6 +36,7 @@ target_sources(dawn_common PRIVATE
"NonCopyable.h"
"PlacementAllocated.h"
"Platform.h"
+ "Preprocessor.h"
"RefBase.h"
"RefCounted.cpp"
"RefCounted.h"
@@ -72,7 +73,7 @@ if (WIN32)
)
endif()
-target_link_libraries(dawn_common PRIVATE dawn_internal_config)
+target_link_libraries(dawn_common PUBLIC dawncpp_headers PRIVATE dawn_internal_config)
# TODO Android Log support
# TODO Vulkan headers support
diff --git a/chromium/third_party/dawn/src/common/DynamicLib.cpp b/chromium/third_party/dawn/src/common/DynamicLib.cpp
index 6de7ced7f34..119ec42f4ac 100644
--- a/chromium/third_party/dawn/src/common/DynamicLib.cpp
+++ b/chromium/third_party/dawn/src/common/DynamicLib.cpp
@@ -18,6 +18,9 @@
#if DAWN_PLATFORM_WINDOWS
# include "common/windows_with_undefs.h"
+# if DAWN_PLATFORM_WINUWP
+# include "common/WindowsUtils.h"
+# endif
#elif DAWN_PLATFORM_POSIX
# include <dlfcn.h>
#else
@@ -43,8 +46,11 @@ bool DynamicLib::Valid() const {
bool DynamicLib::Open(const std::string& filename, std::string* error) {
#if DAWN_PLATFORM_WINDOWS
+# if DAWN_PLATFORM_WINUWP
+ mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0);
+# else
mHandle = LoadLibraryA(filename.c_str());
-
+# endif
if (mHandle == nullptr && error != nullptr) {
*error = "Windows Error: " + std::to_string(GetLastError());
}
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.cpp b/chromium/third_party/dawn/src/common/GPUInfo.cpp
index 65ef78eec32..5aa44ee82d4 100644
--- a/chromium/third_party/dawn/src/common/GPUInfo.cpp
+++ b/chromium/third_party/dawn/src/common/GPUInfo.cpp
@@ -14,6 +14,8 @@
#include "common/GPUInfo.h"
+#include "common/Assert.h"
+
#include <algorithm>
#include <array>
@@ -38,6 +40,13 @@ namespace gpu_info {
const std::array<uint32_t, 21> Cometlake = {
{0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
+
+ bool IsOldIntelD3DVersionScheme(const D3DDriverVersion& driverVersion) {
+ // See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html
+ // for more details
+ return driverVersion[2] < 100u;
+ }
+
} // anonymous namespace
bool IsAMD(PCIVendorID vendorId) {
@@ -65,6 +74,32 @@ namespace gpu_info {
return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
}
+ int CompareD3DDriverVersion(PCIVendorID vendorId,
+ const D3DDriverVersion& version1,
+ const D3DDriverVersion& version2) {
+ if (IsIntel(vendorId)) {
+ // The Intel graphics driver version schema has had a change since the Windows 10 April
+ // 2018 Update release. In the new schema the 3rd number of the driver version is always
+ // 100, while on the older drivers it is always less than 100. For the drivers using the
+ // same driver version schema, the newer driver always has the bigger 4th number.
+ // See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html
+ // for more details.
+ bool isOldIntelDriver1 = IsOldIntelD3DVersionScheme(version1);
+ bool isOldIntelDriver2 = IsOldIntelD3DVersionScheme(version2);
+ if (isOldIntelDriver1 && !isOldIntelDriver2) {
+ return -1;
+ } else if (!isOldIntelDriver1 && isOldIntelDriver2) {
+ return 1;
+ } else {
+ return static_cast<int32_t>(version1[3]) - static_cast<int32_t>(version2[3]);
+ }
+ }
+
+ // TODO(jiawei.shao@intel.com): support other GPU vendors
+ UNREACHABLE();
+ return 0;
+ }
+
// Intel GPUs
bool IsSkylake(PCIDeviceID deviceId) {
return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.h b/chromium/third_party/dawn/src/common/GPUInfo.h
index 09980a7e183..eb696442271 100644
--- a/chromium/third_party/dawn/src/common/GPUInfo.h
+++ b/chromium/third_party/dawn/src/common/GPUInfo.h
@@ -15,6 +15,7 @@
#ifndef COMMON_GPUINFO_H
#define COMMON_GPUINFO_H
+#include <array>
#include <cstdint>
using PCIVendorID = uint32_t;
@@ -43,6 +44,17 @@ namespace gpu_info {
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
+ using D3DDriverVersion = std::array<uint16_t, 4>;
+
+ // Do comparison between two driver versions. Currently we only support the comparison between
+ // Intel D3D driver versions.
+ // - Return a negative value if version1 is older
+ // - Return a positive value if version1 is newer
+ // - Return 0 if version1 and version2 represent same driver version
+ int CompareD3DDriverVersion(PCIVendorID vendorId,
+ const D3DDriverVersion& version1,
+ const D3DDriverVersion& version2);
+
// Intel architectures
bool IsSkylake(PCIDeviceID deviceId);
bool IsKabylake(PCIDeviceID deviceId);
diff --git a/chromium/third_party/dawn/src/common/PlacementAllocated.h b/chromium/third_party/dawn/src/common/PlacementAllocated.h
index 6bb329c3d5c..6c715ca66a6 100644
--- a/chromium/third_party/dawn/src/common/PlacementAllocated.h
+++ b/chromium/third_party/dawn/src/common/PlacementAllocated.h
@@ -32,6 +32,11 @@ class PlacementAllocated {
void operator delete(void* ptr) {
// Object is placement-allocated. Don't free the memory.
}
+
+ void operator delete(void*, void*) {
+ // This is added to match new(size_t size, void* ptr)
+ // Otherwise it triggers C4291 warning in MSVC
+ }
};
#endif // COMMON_PLACEMENTALLOCATED_H_
diff --git a/chromium/third_party/dawn/src/common/Platform.h b/chromium/third_party/dawn/src/common/Platform.h
index af7b1751518..f9471021fd1 100644
--- a/chromium/third_party/dawn/src/common/Platform.h
+++ b/chromium/third_party/dawn/src/common/Platform.h
@@ -16,7 +16,15 @@
#define COMMON_PLATFORM_H_
#if defined(_WIN32) || defined(_WIN64)
+# include <winapifamily.h>
# define DAWN_PLATFORM_WINDOWS 1
+# if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
+# define DAWN_PLATFORM_WIN32 1
+# elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
+# define DAWN_PLATFORM_WINUWP 1
+# else
+# error "Unsupported Windows platform."
+# endif
#elif defined(__linux__)
# define DAWN_PLATFORM_LINUX 1
diff --git a/chromium/third_party/dawn/src/common/Preprocessor.h b/chromium/third_party/dawn/src/common/Preprocessor.h
new file mode 100644
index 00000000000..b49fc3ab247
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/Preprocessor.h
@@ -0,0 +1,71 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_PREPROCESSOR_H_
+#define COMMON_PREPROCESSOR_H_
+
+// DAWN_PP_GET_HEAD: get the first element of a __VA_ARGS__ without triggering empty
+// __VA_ARGS__ warnings.
+#define DAWN_INTERNAL_PP_GET_HEAD(firstParam, ...) firstParam
+#define DAWN_PP_GET_HEAD(...) DAWN_INTERNAL_PP_GET_HEAD(__VA_ARGS__, dummyArg)
+
+// DAWN_PP_CONCATENATE: Concatenate tokens, first expanding the arguments passed in.
+#define DAWN_PP_CONCATENATE(arg1, arg2) DAWN_PP_CONCATENATE_1(arg1, arg2)
+#define DAWN_PP_CONCATENATE_1(arg1, arg2) DAWN_PP_CONCATENATE_2(arg1, arg2)
+#define DAWN_PP_CONCATENATE_2(arg1, arg2) arg1##arg2
+
+// DAWN_PP_EXPAND: Needed to help expand __VA_ARGS__ out on MSVC
+#define DAWN_PP_EXPAND(...) __VA_ARGS__
+
+// Implementation of DAWN_PP_FOR_EACH, called by concatenating DAWN_PP_FOR_EACH_ with a number.
+#define DAWN_PP_FOR_EACH_1(func, x) func(x)
+#define DAWN_PP_FOR_EACH_2(func, x, ...) \
+ func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_1)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_3(func, x, ...) \
+ func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_2)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_4(func, x, ...) \
+ func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_3)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_5(func, x, ...) \
+ func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_4)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_6(func, x, ...) \
+ func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_5)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_7(func, x, ...) \
+ func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_6)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_8(func, x, ...) \
+ func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_7)(func, __VA_ARGS__))
+
+// Implementation for DAWN_PP_FOR_EACH. Get the number of args in __VA_ARGS__ so we can concat
+// DAWN_PP_FOR_EACH_ and N.
+// ex.) DAWN_PP_FOR_EACH_NARG(a, b, c) ->
+// DAWN_PP_FOR_EACH_NARG(a, b, c, DAWN_PP_FOR_EACH_RSEQ()) ->
+// DAWN_PP_FOR_EACH_NARG_(a, b, c, 8, 7, 6, 5, 4, 3, 2, 1, 0) ->
+// DAWN_PP_FOR_EACH_ARG_N(a, b, c, 8, 7, 6, 5, 4, 3, 2, 1, 0) ->
+// DAWN_PP_FOR_EACH_ARG_N( , , , , , , , , N) ->
+// 3
+#define DAWN_PP_FOR_EACH_NARG(...) DAWN_PP_FOR_EACH_NARG_(__VA_ARGS__, DAWN_PP_FOR_EACH_RSEQ())
+#define DAWN_PP_FOR_EACH_NARG_(...) \
+ DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_ARG_N)(__VA_ARGS__))
+#define DAWN_PP_FOR_EACH_ARG_N(_1, _2, _3, _4, _5, _6, _7, _8, N, ...) N
+#define DAWN_PP_FOR_EACH_RSEQ() 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+// Implementation for DAWN_PP_FOR_EACH.
+// Creates a call to DAWN_PP_FOR_EACH_X where X is 1, 2, ..., etc.
+#define DAWN_PP_FOR_EACH_(N, func, x, ...) \
+ DAWN_PP_CONCATENATE(DAWN_PP_FOR_EACH_, N)(func, x, __VA_ARGS__)
+
+// DAWN_PP_FOR_EACH: Apply |func| to each argument in |x| and __VA_ARGS__
+#define DAWN_PP_FOR_EACH(func, x, ...) \
+ DAWN_PP_FOR_EACH_(DAWN_PP_FOR_EACH_NARG(x, __VA_ARGS__), func, x, __VA_ARGS__)
+
+#endif // COMMON_PREPROCESSOR_H_
diff --git a/chromium/third_party/dawn/src/common/SlabAllocator.cpp b/chromium/third_party/dawn/src/common/SlabAllocator.cpp
index 61948873ba8..e58a235a5dc 100644
--- a/chromium/third_party/dawn/src/common/SlabAllocator.cpp
+++ b/chromium/third_party/dawn/src/common/SlabAllocator.cpp
@@ -30,12 +30,8 @@ SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
// Slab
-SlabAllocatorImpl::Slab::Slab(std::unique_ptr<char[]> allocation, IndexLinkNode* head)
- : allocation(std::move(allocation)),
- freeList(head),
- prev(nullptr),
- next(nullptr),
- blocksInUse(0) {
+SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
+ : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {
}
SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
@@ -50,7 +46,8 @@ SlabAllocatorImpl::SentinelSlab::~SentinelSlab() {
while (slab != nullptr) {
Slab* next = slab->next;
ASSERT(slab->blocksInUse == 0);
- slab->~Slab();
+ // Delete the slab's allocation. The slab is allocated inside slab->allocation.
+ delete[] slab->allocation;
slab = next;
}
}
@@ -232,8 +229,8 @@ void SlabAllocatorImpl::GetNewSlab() {
}
// TODO(enga): Use aligned_alloc with C++17.
- auto allocation = std::unique_ptr<char[]>(new char[mTotalAllocationSize]);
- char* alignedPtr = AlignPtr(allocation.get(), mAllocationAlignment);
+ char* allocation = new char[mTotalAllocationSize];
+ char* alignedPtr = AlignPtr(allocation, mAllocationAlignment);
char* dataStart = alignedPtr + mSlabBlocksOffset;
@@ -245,5 +242,5 @@ void SlabAllocatorImpl::GetNewSlab() {
IndexLinkNode* lastNode = OffsetFrom(node, mBlocksPerSlab - 1);
lastNode->nextIndex = kInvalidIndex;
- mAvailableSlabs.Prepend(new (alignedPtr) Slab(std::move(allocation), node));
+ mAvailableSlabs.Prepend(new (alignedPtr) Slab(allocation, node));
}
diff --git a/chromium/third_party/dawn/src/common/SlabAllocator.h b/chromium/third_party/dawn/src/common/SlabAllocator.h
index 939f1c029d1..8a78dde6262 100644
--- a/chromium/third_party/dawn/src/common/SlabAllocator.h
+++ b/chromium/third_party/dawn/src/common/SlabAllocator.h
@@ -18,8 +18,8 @@
#include "common/PlacementAllocated.h"
#include <cstdint>
-#include <memory>
#include <type_traits>
+#include <utility>
// The SlabAllocator allocates objects out of one or more fixed-size contiguous "slabs" of memory.
// This makes it very quick to allocate and deallocate fixed-size objects because the allocator only
@@ -77,12 +77,12 @@ class SlabAllocatorImpl {
// Ownership of the allocation is transferred to the slab on creation.
// | ---------- allocation --------- |
// | pad | Slab | data ------------> |
- Slab(std::unique_ptr<char[]> allocation, IndexLinkNode* head);
+ Slab(char allocation[], IndexLinkNode* head);
Slab(Slab&& rhs);
void Splice();
- std::unique_ptr<char[]> allocation;
+ char* allocation;
IndexLinkNode* freeList;
Slab* prev;
Slab* next;
diff --git a/chromium/third_party/dawn/src/common/VertexFormatUtils.h b/chromium/third_party/dawn/src/common/VertexFormatUtils.h
index 632ca7fba7e..06fc08101a3 100644
--- a/chromium/third_party/dawn/src/common/VertexFormatUtils.h
+++ b/chromium/third_party/dawn/src/common/VertexFormatUtils.h
@@ -19,6 +19,9 @@
#include <dawn/webgpu_cpp.h>
+// TODO(dawn:695): Remove the dawncpp_headers CMake dependency when VertexFormatUtils is deleted,
+// assuming no other dependencies have been added in other project files.
+
namespace dawn {
static constexpr std::array<wgpu::VertexFormat, 30> kAllVertexFormats = {
diff --git a/chromium/third_party/dawn/src/common/WindowsUtils.cpp b/chromium/third_party/dawn/src/common/WindowsUtils.cpp
index 0f9b9852fca..f4aef7cb3dd 100644
--- a/chromium/third_party/dawn/src/common/WindowsUtils.cpp
+++ b/chromium/third_party/dawn/src/common/WindowsUtils.cpp
@@ -30,3 +30,16 @@ std::string WCharToUTF8(const wchar_t* input) {
// This will allocate the returned std::string and then destroy result.
return std::string(result.get(), result.get() + (requiredSize - 1));
}
+
+std::wstring UTF8ToWStr(const char* input) {
+ // The -1 argument asks MultiByteToWideChar to use the null terminator to know the size of
+ // input. It will return a size that includes the null terminator.
+ int requiredSize = MultiByteToWideChar(CP_UTF8, 0, input, -1, nullptr, 0);
+
+ // When we can use C++17 this can be changed to use string.data() instead.
+ std::unique_ptr<wchar_t[]> result = std::make_unique<wchar_t[]>(requiredSize);
+ MultiByteToWideChar(CP_UTF8, 0, input, -1, result.get(), requiredSize);
+
+ // This will allocate the returned std::string and then destroy result.
+ return std::wstring(result.get(), result.get() + (requiredSize - 1));
+}
diff --git a/chromium/third_party/dawn/src/common/WindowsUtils.h b/chromium/third_party/dawn/src/common/WindowsUtils.h
index 0c43d08aa41..3ab916bbcdc 100644
--- a/chromium/third_party/dawn/src/common/WindowsUtils.h
+++ b/chromium/third_party/dawn/src/common/WindowsUtils.h
@@ -19,4 +19,6 @@
std::string WCharToUTF8(const wchar_t* input);
+std::wstring UTF8ToWStr(const char* input);
+
#endif // COMMON_WINDOWSUTILS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
index 56a9ba557b8..7dce1021749 100644
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
@@ -65,7 +65,10 @@ namespace dawn_native {
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
++i) {
TextureViewBase* attachment =
- descriptor->colorAttachments[static_cast<uint8_t>(i)].attachment;
+ descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
+ if (attachment == nullptr) {
+ attachment = descriptor->colorAttachments[static_cast<uint8_t>(i)].attachment;
+ }
mColorAttachmentsSet.set(i);
mColorFormats[i] = attachment->GetFormat().format;
if (mSampleCount == 0) {
@@ -75,7 +78,10 @@ namespace dawn_native {
}
}
if (descriptor->depthStencilAttachment != nullptr) {
- TextureViewBase* attachment = descriptor->depthStencilAttachment->attachment;
+ TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
+ if (attachment == nullptr) {
+ attachment = descriptor->depthStencilAttachment->attachment;
+ }
mDepthStencilFormat = attachment->GetFormat().format;
if (mSampleCount == 0) {
mSampleCount = attachment->GetTexture()->GetSampleCount();
diff --git a/chromium/third_party/dawn/src/dawn_native/BUILD.gn b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
index 803ee30246b..8e4f0c4ddfd 100644
--- a/chromium/third_party/dawn/src/dawn_native/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
@@ -91,6 +91,8 @@ config("dawn_native_vulkan_rpath") {
dawn_json_generator("dawn_native_utils_gen") {
target = "dawn_native_utils"
outputs = [
+ "src/dawn_native/ChainUtils_autogen.h",
+ "src/dawn_native/ChainUtils_autogen.cpp",
"src/dawn_native/ProcTable.cpp",
"src/dawn_native/wgpu_structs_autogen.h",
"src/dawn_native/wgpu_structs_autogen.cpp",
@@ -182,6 +184,8 @@ source_set("dawn_native_sources") {
"Buffer.h",
"CachedObject.cpp",
"CachedObject.h",
+ "CallbackTaskManager.cpp",
+ "CallbackTaskManager.h",
"CommandAllocator.cpp",
"CommandAllocator.h",
"CommandBuffer.cpp",
@@ -202,8 +206,8 @@ source_set("dawn_native_sources") {
"ComputePipeline.h",
"CopyTextureForBrowserHelper.cpp",
"CopyTextureForBrowserHelper.h",
- "CreatePipelineAsyncTracker.cpp",
- "CreatePipelineAsyncTracker.h",
+ "CreatePipelineAsyncTask.cpp",
+ "CreatePipelineAsyncTask.h",
"Device.cpp",
"Device.h",
"DynamicUploader.cpp",
@@ -307,10 +311,28 @@ source_set("dawn_native_sources") {
]
}
- if (is_win) {
+ # Only win32 app needs to link with user32.lib
+ # In UWP, all availiable APIs are defined in WindowsApp.lib
+ if (is_win && !dawn_is_winuwp) {
libs += [ "user32.lib" ]
}
+ if (dawn_is_winuwp && is_debug) {
+ # DXGIGetDebugInterface1 is defined in dxgi.lib
+ # But this API is tagged as a development-only capability
+ # which implies that linking to this function will cause
+ # the application to fail Windows store certification
+ # So we only link to it in debug build when compiling for UWP.
+ # In win32 we load dxgi.dll using LoadLibrary
+ # so no need for static linking.
+ libs += [ "dxgi.lib" ]
+ }
+
+ # TODO(dawn:766):
+ # Should link dxcompiler.lib and WinPixEventRuntime_UAP.lib in UWP
+ # Somehow use dxcompiler.lib makes CoreApp unable to activate
+ # WinPIX should be added as third party tools and linked statically
+
if (dawn_enable_d3d12) {
libs += [ "dxguid.lib" ]
sources += [
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
index bcff274022f..4417610ec34 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
@@ -542,6 +542,10 @@ namespace dawn_native {
return mBindingMap;
}
+ bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
+ return mBindingMap.count(bindingNumber) != 0;
+ }
+
BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
ASSERT(!IsError());
const auto& it = mBindingMap.find(bindingNumber);
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
index 113c5912df4..641bf45175f 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
@@ -54,6 +54,7 @@ namespace dawn_native {
return mBindingInfo[bindingIndex];
}
const BindingMap& GetBindingMap() const;
+ bool HasBinding(BindingNumber bindingNumber) const;
BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
// Functions necessary for the unordered_set<BGLBase*>-based cache.
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
index e8bb23f50d2..3ddb72bd719 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
@@ -133,8 +133,7 @@ namespace dawn_native {
mUsage(descriptor->usage),
mState(BufferState::Unmapped) {
// Add readonly storage usage if the buffer has a storage usage. The validation rules in
- // ValidatePassResourceUsage will make sure we don't use both at the same
- // time.
+ // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
if (mUsage & wgpu::BufferUsage::Storage) {
mUsage |= kReadOnlyStorageBuffer;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.h b/chromium/third_party/dawn/src/dawn_native/Buffer.h
index 8e848da9819..ade139e3218 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.h
@@ -34,7 +34,8 @@ namespace dawn_native {
static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
- wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer;
+ wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
+ wgpu::BufferUsage::Indirect;
class BufferBase : public ObjectBase {
enum class BufferState {
diff --git a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
index ba6cec9d320..b0d470b6ed4 100644
--- a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
@@ -50,6 +50,8 @@ target_sources(dawn_native PRIVATE
"Buffer.h"
"CachedObject.cpp"
"CachedObject.h"
+ "CallbackTaskManager.cpp"
+ "CallbackTaskManager.h"
"CommandAllocator.cpp"
"CommandAllocator.h"
"CommandBuffer.cpp"
@@ -70,8 +72,8 @@ target_sources(dawn_native PRIVATE
"ComputePipeline.h"
"CopyTextureForBrowserHelper.cpp"
"CopyTextureForBrowserHelper.h"
- "CreatePipelineAsyncTracker.cpp"
- "CreatePipelineAsyncTracker.h"
+ "CreatePipelineAsyncTask.cpp"
+ "CreatePipelineAsyncTask.h"
"Device.cpp"
"Device.h"
"DynamicUploader.cpp"
diff --git a/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.cpp b/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.cpp
new file mode 100644
index 00000000000..1c9106c2610
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.cpp
@@ -0,0 +1,37 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/CallbackTaskManager.h"
+
+namespace dawn_native {
+
+ bool CallbackTaskManager::IsEmpty() {
+ std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+ return mCallbackTaskQueue.empty();
+ }
+
+ std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
+ std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+
+ std::vector<std::unique_ptr<CallbackTask>> allTasks;
+ allTasks.swap(mCallbackTaskQueue);
+ return allTasks;
+ }
+
+ void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
+ std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+ mCallbackTaskQueue.push_back(std::move(callbackTask));
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.h b/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.h
new file mode 100644
index 00000000000..1be0eb22b0e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.h
@@ -0,0 +1,47 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CALLBACK_TASK_MANAGER_H_
+#define DAWNNATIVE_CALLBACK_TASK_MANAGER_H_
+
+#include <memory>
+#include <mutex>
+#include <vector>
+
+namespace dawn_native {
+
+ class CallbackTaskManager;
+
+ struct CallbackTask {
+ public:
+ virtual ~CallbackTask() = default;
+ virtual void Finish() = 0;
+ virtual void HandleShutDown() = 0;
+ virtual void HandleDeviceLoss() = 0;
+ };
+
+ class CallbackTaskManager {
+ public:
+ void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
+ bool IsEmpty();
+ std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
+
+ private:
+ std::mutex mCallbackTaskQueueMutex;
+ std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
+ };
+
+} // namespace dawn_native
+
+#endif
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
index 7b6d3670782..24d834262b7 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
@@ -227,4 +227,12 @@ namespace dawn_native {
mAspects &= ~kLazyAspects;
}
+ BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
+ return mBindgroups[index];
+ }
+
+ PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
+ return mLastPipelineLayout;
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
index 84a3e6dc538..8c223cf86d2 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
@@ -50,6 +50,8 @@ namespace dawn_native {
wgpu::IndexFormat GetIndexFormat() {
return mIndexFormat;
}
+ BindGroupBase* GetBindGroup(BindGroupIndex index) const;
+ PipelineLayoutBase* GetPipelineLayout() const;
private:
MaybeError ValidateOperation(ValidationAspects requiredAspects);
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
index 55fc22778ab..05d06565f1e 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
@@ -155,7 +155,8 @@ namespace dawn_native {
}
const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
- const TextureViewBase* attachment = colorAttachment.attachment;
+ const TextureViewBase* attachment =
+ colorAttachment.view != nullptr ? colorAttachment.view : colorAttachment.attachment;
DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
wgpu::TextureUsage::RenderAttachment));
@@ -178,17 +179,13 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("The mip level count of the resolve target must be 1");
}
- uint32_t colorAttachmentBaseMipLevel = attachment->GetBaseMipLevel();
- const Extent3D& colorTextureSize = attachment->GetTexture()->GetSize();
- uint32_t colorAttachmentWidth = colorTextureSize.width >> colorAttachmentBaseMipLevel;
- uint32_t colorAttachmentHeight = colorTextureSize.height >> colorAttachmentBaseMipLevel;
-
- uint32_t resolveTargetBaseMipLevel = resolveTarget->GetBaseMipLevel();
- const Extent3D& resolveTextureSize = resolveTarget->GetTexture()->GetSize();
- uint32_t resolveTargetWidth = resolveTextureSize.width >> resolveTargetBaseMipLevel;
- uint32_t resolveTargetHeight = resolveTextureSize.height >> resolveTargetBaseMipLevel;
- if (colorAttachmentWidth != resolveTargetWidth ||
- colorAttachmentHeight != resolveTargetHeight) {
+ const Extent3D& colorTextureSize =
+ attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
+ const Extent3D& resolveTextureSize =
+ resolveTarget->GetTexture()->GetMipLevelVirtualSize(
+ resolveTarget->GetBaseMipLevel());
+ if (colorTextureSize.width != resolveTextureSize.width ||
+ colorTextureSize.height != resolveTextureSize.height) {
return DAWN_VALIDATION_ERROR(
"The size of the resolve target must be the same as the color attachment");
}
@@ -203,16 +200,33 @@ namespace dawn_native {
}
MaybeError ValidateRenderPassColorAttachment(
- const DeviceBase* device,
+ DeviceBase* device,
const RenderPassColorAttachmentDescriptor& colorAttachment,
uint32_t* width,
uint32_t* height,
uint32_t* sampleCount) {
- DAWN_TRY(device->ValidateObject(colorAttachment.attachment));
- DAWN_TRY(ValidateCanUseAs(colorAttachment.attachment->GetTexture(),
- wgpu::TextureUsage::RenderAttachment));
+ TextureViewBase* attachment;
+ if (colorAttachment.view != nullptr) {
+ if (colorAttachment.attachment != nullptr) {
+ return DAWN_VALIDATION_ERROR(
+ "Cannot specify both a attachment and view. attachment is deprecated, "
+ "favor view instead.");
+ }
+ attachment = colorAttachment.view;
+ } else if (colorAttachment.attachment != nullptr) {
+ device->EmitDeprecationWarning(
+ "RenderPassColorAttachmentDescriptor.attachment has been deprecated. Use "
+ "RenderPassColorAttachmentDescriptor.view instead.");
+ attachment = colorAttachment.attachment;
+ } else {
+ return DAWN_VALIDATION_ERROR(
+ "Must specify a view for RenderPassColorAttachmentDescriptor");
+ }
+
+ DAWN_TRY(device->ValidateObject(attachment));
+ DAWN_TRY(
+ ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment));
- const TextureViewBase* attachment = colorAttachment.attachment;
if (!(attachment->GetAspects() & Aspect::Color) ||
!attachment->GetFormat().isRenderable) {
return DAWN_VALIDATION_ERROR(
@@ -243,18 +257,35 @@ namespace dawn_native {
}
MaybeError ValidateRenderPassDepthStencilAttachment(
- const DeviceBase* device,
+ DeviceBase* device,
const RenderPassDepthStencilAttachmentDescriptor* depthStencilAttachment,
uint32_t* width,
uint32_t* height,
uint32_t* sampleCount) {
DAWN_ASSERT(depthStencilAttachment != nullptr);
- DAWN_TRY(device->ValidateObject(depthStencilAttachment->attachment));
- DAWN_TRY(ValidateCanUseAs(depthStencilAttachment->attachment->GetTexture(),
- wgpu::TextureUsage::RenderAttachment));
+ TextureViewBase* attachment;
+ if (depthStencilAttachment->view != nullptr) {
+ if (depthStencilAttachment->attachment != nullptr) {
+ return DAWN_VALIDATION_ERROR(
+ "Cannot specify both a attachment and view. attachment is deprecated, "
+ "favor view instead.");
+ }
+ attachment = depthStencilAttachment->view;
+ } else if (depthStencilAttachment->attachment != nullptr) {
+ device->EmitDeprecationWarning(
+ "RenderPassDepthStencilAttachmentDescriptor.attachment has been deprecated. "
+ "Use RenderPassDepthStencilAttachmentDescriptor.view instead.");
+ attachment = depthStencilAttachment->attachment;
+ } else {
+ return DAWN_VALIDATION_ERROR(
+ "Must specify a view for RenderPassDepthStencilAttachmentDescriptor");
+ }
+
+ DAWN_TRY(device->ValidateObject(attachment));
+ DAWN_TRY(
+ ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment));
- const TextureViewBase* attachment = depthStencilAttachment->attachment;
if ((attachment->GetAspects() & (Aspect::Depth | Aspect::Stencil)) == Aspect::None ||
!attachment->GetFormat().isRenderable) {
return DAWN_VALIDATION_ERROR(
@@ -313,7 +344,7 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateRenderPassDescriptor(const DeviceBase* device,
+ MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
const RenderPassDescriptor* descriptor,
uint32_t* width,
uint32_t* height,
@@ -335,14 +366,6 @@ namespace dawn_native {
if (descriptor->occlusionQuerySet != nullptr) {
DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
- // Occlusion query has not been implemented completely. Disallow it as unsafe until
- // the implementaion is completed.
- if (device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
- return DAWN_VALIDATION_ERROR(
- "Occlusion query is disallowed because it has not been implemented "
- "completely.");
- }
-
if (descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion) {
return DAWN_VALIDATION_ERROR("The type of query set must be Occlusion");
}
@@ -401,6 +424,7 @@ namespace dawn_native {
MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
QuerySetBase* querySet,
+ uint32_t firstQuery,
uint32_t queryCount,
BufferBase* destination,
uint64_t destinationOffset) {
@@ -416,27 +440,29 @@ namespace dawn_native {
BufferDescriptor availabilityDesc = {};
availabilityDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
availabilityDesc.size = querySet->GetQueryCount() * sizeof(uint32_t);
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<BufferBase> availabilityBuffer =
- AcquireRef(device->APICreateBuffer(&availabilityDesc));
+ Ref<BufferBase> availabilityBuffer;
+ DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
+
DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0,
availability.data(),
availability.size() * sizeof(uint32_t)));
// Timestamp params uniform buffer
- TimestampParams params = {queryCount, static_cast<uint32_t>(destinationOffset),
+ TimestampParams params = {firstQuery, queryCount,
+ static_cast<uint32_t>(destinationOffset),
device->GetTimestampPeriodInNS()};
+
BufferDescriptor parmsDesc = {};
parmsDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
parmsDesc.size = sizeof(params);
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<BufferBase> paramsBuffer = AcquireRef(device->APICreateBuffer(&parmsDesc));
+ Ref<BufferBase> paramsBuffer;
+ DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
+
DAWN_TRY(
device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
- EncodeConvertTimestampsToNanoseconds(encoder, destination, availabilityBuffer.Get(),
- paramsBuffer.Get());
- return {};
+ return EncodeConvertTimestampsToNanoseconds(
+ encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get());
}
} // namespace
@@ -446,9 +472,9 @@ namespace dawn_native {
}
CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
- return CommandBufferResourceUsage{mEncodingContext.AcquirePassUsages(),
- std::move(mTopLevelBuffers), std::move(mTopLevelTextures),
- std::move(mUsedQuerySets)};
+ return CommandBufferResourceUsage{
+ mEncodingContext.AcquireRenderPassUsages(), mEncodingContext.AcquireComputePassUsages(),
+ std::move(mTopLevelBuffers), std::move(mTopLevelTextures), std::move(mUsedQuerySets)};
}
CommandIterator CommandEncoder::AcquireCommands() {
@@ -498,7 +524,7 @@ namespace dawn_native {
RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
DeviceBase* device = GetDevice();
- PassResourceUsageTracker usageTracker(PassType::Render);
+ RenderPassResourceUsageTracker usageTracker;
uint32_t width = 0;
uint32_t height = 0;
@@ -521,7 +547,10 @@ namespace dawn_native {
for (ColorAttachmentIndex index :
IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
uint8_t i = static_cast<uint8_t>(index);
- TextureViewBase* view = descriptor->colorAttachments[i].attachment;
+ TextureViewBase* view = descriptor->colorAttachments[i].view;
+ if (view == nullptr) {
+ view = descriptor->colorAttachments[i].attachment;
+ }
TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
cmd->colorAttachments[index].view = view;
@@ -540,7 +569,10 @@ namespace dawn_native {
}
if (cmd->attachmentState->HasDepthStencilAttachment()) {
- TextureViewBase* view = descriptor->depthStencilAttachment->attachment;
+ TextureViewBase* view = descriptor->depthStencilAttachment->view;
+ if (view == nullptr) {
+ view = descriptor->depthStencilAttachment->attachment;
+ }
cmd->depthStencilAttachment.view = view;
cmd->depthStencilAttachment.clearDepth =
@@ -623,14 +655,11 @@ namespace dawn_native {
const ImageCopyTexture* destination,
const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- Extent3D fixedCopySize = *copySize;
- DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedCopySize));
-
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
DAWN_TRY(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc));
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, fixedCopySize));
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
@@ -639,26 +668,26 @@ namespace dawn_native {
// because in the latter we divide copyExtent.width by blockWidth and
// copyExtent.height by blockHeight while the divisibility conditions are
// checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, fixedCopySize));
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
}
const TexelBlockInfo& blockInfo =
destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
TextureDataLayout srcLayout = FixUpDeprecatedTextureDataLayoutOptions(
- GetDevice(), source->layout, blockInfo, fixedCopySize);
+ GetDevice(), source->layout, blockInfo, *copySize);
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(ValidateLinearTextureCopyOffset(srcLayout, blockInfo));
DAWN_TRY(ValidateLinearTextureData(srcLayout, source->buffer->GetSize(), blockInfo,
- fixedCopySize));
+ *copySize));
mTopLevelBuffers.insert(source->buffer);
mTopLevelTextures.insert(destination->texture);
}
- ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, fixedCopySize);
+ ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
// Skip noop copies.
- if (fixedCopySize.width != 0 && fixedCopySize.height != 0 &&
- fixedCopySize.depthOrArrayLayers != 0) {
+ if (copySize->width != 0 && copySize->height != 0 &&
+ copySize->depthOrArrayLayers != 0) {
// Record the copy command.
CopyBufferToTextureCmd* copy =
allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
@@ -671,7 +700,7 @@ namespace dawn_native {
copy->destination.mipLevel = destination->mipLevel;
copy->destination.aspect =
ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = fixedCopySize;
+ copy->copySize = *copySize;
}
return {};
@@ -682,11 +711,8 @@ namespace dawn_native {
const ImageCopyBuffer* destination,
const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- Extent3D fixedCopySize = *copySize;
- DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedCopySize));
-
if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, fixedCopySize));
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
@@ -698,26 +724,26 @@ namespace dawn_native {
// because in the latter we divide copyExtent.width by blockWidth and
// copyExtent.height by blockHeight while the divisibility conditions are
// checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, fixedCopySize));
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
}
const TexelBlockInfo& blockInfo =
source->texture->GetFormat().GetAspectInfo(source->aspect).block;
TextureDataLayout dstLayout = FixUpDeprecatedTextureDataLayoutOptions(
- GetDevice(), destination->layout, blockInfo, fixedCopySize);
+ GetDevice(), destination->layout, blockInfo, *copySize);
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(ValidateLinearTextureCopyOffset(dstLayout, blockInfo));
DAWN_TRY(ValidateLinearTextureData(dstLayout, destination->buffer->GetSize(),
- blockInfo, fixedCopySize));
+ blockInfo, *copySize));
mTopLevelTextures.insert(source->texture);
mTopLevelBuffers.insert(destination->buffer);
}
- ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, fixedCopySize);
+ ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
// Skip noop copies.
- if (fixedCopySize.width != 0 && fixedCopySize.height != 0 &&
- fixedCopySize.depthOrArrayLayers != 0) {
+ if (copySize->width != 0 && copySize->height != 0 &&
+ copySize->depthOrArrayLayers != 0) {
// Record the copy command.
CopyTextureToBufferCmd* copy =
allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
@@ -729,7 +755,7 @@ namespace dawn_native {
copy->destination.offset = dstLayout.offset;
copy->destination.bytesPerRow = dstLayout.bytesPerRow;
copy->destination.rowsPerImage = dstLayout.rowsPerImage;
- copy->copySize = fixedCopySize;
+ copy->copySize = *copySize;
}
return {};
@@ -740,20 +766,18 @@ namespace dawn_native {
const ImageCopyTexture* destination,
const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- Extent3D fixedCopySize = *copySize;
- DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedCopySize));
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(source->texture));
DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, fixedCopySize));
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, fixedCopySize));
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
DAWN_TRY(
- ValidateTextureToTextureCopyRestrictions(*source, *destination, fixedCopySize));
+ ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, fixedCopySize));
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, fixedCopySize));
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
@@ -763,8 +787,8 @@ namespace dawn_native {
}
// Skip noop copies.
- if (fixedCopySize.width != 0 && fixedCopySize.height != 0 &&
- fixedCopySize.depthOrArrayLayers != 0) {
+ if (copySize->width != 0 && copySize->height != 0 &&
+ copySize->depthOrArrayLayers != 0) {
CopyTextureToTextureCmd* copy =
allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
copy->source.texture = source->texture;
@@ -776,7 +800,7 @@ namespace dawn_native {
copy->destination.mipLevel = destination->mipLevel;
copy->destination.aspect =
ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = fixedCopySize;
+ copy->copySize = *copySize;
}
return {};
@@ -785,7 +809,7 @@ namespace dawn_native {
void CommandEncoder::APIInjectValidationError(const char* message) {
if (mEncodingContext.CheckCurrentEncoder(this)) {
- mEncodingContext.HandleError(InternalErrorType::Validation, message);
+ mEncodingContext.HandleError(DAWN_VALIDATION_ERROR(message));
}
}
@@ -860,8 +884,8 @@ namespace dawn_native {
// Encode internal compute pipeline for timestamp query
if (querySet->GetQueryType() == wgpu::QueryType::Timestamp) {
- DAWN_TRY(EncodeTimestampsToNanosecondsConversion(this, querySet, queryCount, destination,
- destinationOffset));
+ DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
+ this, querySet, firstQuery, queryCount, destination, destinationOffset));
}
return {};
@@ -906,20 +930,24 @@ namespace dawn_native {
DAWN_TRY(device->ValidateIsAlive());
if (device->IsValidationEnabled()) {
- DAWN_TRY(
- ValidateFinish(mEncodingContext.GetIterator(), mEncodingContext.GetPassUsages()));
+ DAWN_TRY(ValidateFinish());
}
return device->CreateCommandBuffer(this, descriptor);
}
// Implementation of the command buffer validation that can be precomputed before submit
- MaybeError CommandEncoder::ValidateFinish(CommandIterator* commands,
- const PerPassUsages& perPassUsages) const {
+ MaybeError CommandEncoder::ValidateFinish() const {
TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
DAWN_TRY(GetDevice()->ValidateObject(this));
- for (const PassResourceUsage& passUsage : perPassUsages) {
- DAWN_TRY(ValidatePassResourceUsage(passUsage));
+ for (const RenderPassResourceUsage& passUsage : mEncodingContext.GetRenderPassUsages()) {
+ DAWN_TRY(ValidateSyncScopeResourceUsage(passUsage));
+ }
+
+ for (const ComputePassResourceUsage& passUsage : mEncodingContext.GetComputePassUsages()) {
+ for (const SyncScopeResourceUsage& scope : passUsage.dispatchUsages) {
+ DAWN_TRY(ValidateSyncScopeResourceUsage(scope));
+ }
}
if (mDebugGroupStackSize != 0) {
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
index a8bf6a0d288..13678f17ce0 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
@@ -73,8 +73,7 @@ namespace dawn_native {
ResultOrError<Ref<CommandBufferBase>> FinishInternal(
const CommandBufferDescriptor* descriptor);
- MaybeError ValidateFinish(CommandIterator* commands,
- const PerPassUsages& perPassUsages) const;
+ MaybeError ValidateFinish() const;
EncodingContext mEncodingContext;
std::set<BufferBase*> mTopLevelBuffers;
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
index 1e2fc38d3da..ba70ce713ad 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
@@ -28,39 +28,31 @@
namespace dawn_native {
- // Performs the per-pass usage validation checks
- // This will eventually need to differentiate between render and compute passes.
- // It will be valid to use a buffer both as uniform and storage in the same compute pass.
- // TODO(yunchao.he@intel.com): add read/write usage tracking for compute
- MaybeError ValidatePassResourceUsage(const PassResourceUsage& pass) {
- // TODO(cwallez@chromium.org): Remove this special casing once the PassResourceUsage is a
- // SyncScopeResourceUsage.
- if (pass.passType != PassType::Render) {
- return {};
- }
-
+ // Performs validation of the "synchronization scope" rules of WebGPU.
+ MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
// Buffers can only be used as single-write or multiple read.
- for (size_t i = 0; i < pass.buffers.size(); ++i) {
- wgpu::BufferUsage usage = pass.bufferUsages[i];
+ for (wgpu::BufferUsage usage : scope.bufferUsages) {
bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
bool singleUse = wgpu::HasZeroOrOneBits(usage);
if (!readOnly && !singleUse) {
return DAWN_VALIDATION_ERROR(
- "Buffer used as writable usage and another usage in pass");
+ "Buffer used as writable usage and another usage in the same synchronization "
+ "scope");
}
}
// Check that every single subresource is used as either a single-write usage or a
// combination of readonly usages.
- for (const PassTextureUsage& textureUsage : pass.textureUsages) {
+ for (const TextureSubresourceUsage& textureUsage : scope.textureUsages) {
MaybeError error = {};
textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
bool singleUse = wgpu::HasZeroOrOneBits(usage);
if (!readOnly && !singleUse && !error.IsError()) {
error = DAWN_VALIDATION_ERROR(
- "Texture used as writable usage and another usage in render pass");
+ "Texture used as writable usage and another usage in the same "
+ "synchronization scope");
}
});
DAWN_TRY(std::move(error));
@@ -429,12 +421,13 @@ namespace dawn_native {
}
if (src.texture == dst.texture && src.mipLevel == dst.mipLevel) {
- ASSERT(src.texture->GetDimension() == wgpu::TextureDimension::e2D &&
- dst.texture->GetDimension() == wgpu::TextureDimension::e2D);
- if (IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers)) {
+ wgpu::TextureDimension dimension = src.texture->GetDimension();
+ ASSERT(dimension != wgpu::TextureDimension::e1D);
+ if ((dimension == wgpu::TextureDimension::e2D &&
+ IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers)) ||
+ dimension == wgpu::TextureDimension::e3D) {
return DAWN_VALIDATION_ERROR(
- "Copy subresources cannot be overlapped when copying within the same "
- "texture.");
+ "Cannot copy between overlapping subresources of the same texture.");
}
}
@@ -462,8 +455,8 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Source texture must have sampled usage");
}
- if (!(dst.texture->GetUsage() & wgpu::TextureUsage::OutputAttachment)) {
- return DAWN_VALIDATION_ERROR("Dest texture must have outputAttachment usage");
+ if (!(dst.texture->GetUsage() & wgpu::TextureUsage::RenderAttachment)) {
+ return DAWN_VALIDATION_ERROR("Dest texture must have RenderAttachment usage");
}
return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
index 9c8a42f5ce4..7b5fce2bbab 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
@@ -24,10 +24,10 @@
namespace dawn_native {
class QuerySetBase;
- struct PassResourceUsage;
+ struct SyncScopeResourceUsage;
struct TexelBlockInfo;
- MaybeError ValidatePassResourceUsage(const PassResourceUsage& usage);
+ MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex);
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.cpp b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
index eba44c6989a..e3f852e2a31 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
@@ -168,9 +168,9 @@ namespace dawn_native {
cmd->~SetScissorRectCmd();
break;
}
- case Command::SetBlendColor: {
- SetBlendColorCmd* cmd = commands->NextCommand<SetBlendColorCmd>();
- cmd->~SetBlendColorCmd();
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
+ cmd->~SetBlendConstantCmd();
break;
}
case Command::SetBindGroup: {
@@ -315,8 +315,8 @@ namespace dawn_native {
commands->NextCommand<SetScissorRectCmd>();
break;
- case Command::SetBlendColor:
- commands->NextCommand<SetBlendColorCmd>();
+ case Command::SetBlendConstant:
+ commands->NextCommand<SetBlendConstantCmd>();
break;
case Command::SetBindGroup: {
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.h b/chromium/third_party/dawn/src/dawn_native/Commands.h
index aa93c835c7e..3c958fa03a0 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.h
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.h
@@ -59,7 +59,7 @@ namespace dawn_native {
SetStencilReference,
SetViewport,
SetScissorRect,
- SetBlendColor,
+ SetBlendConstant,
SetBindGroup,
SetIndexBuffer,
SetVertexBuffer,
@@ -231,7 +231,7 @@ namespace dawn_native {
uint32_t x, y, width, height;
};
- struct SetBlendColorCmd {
+ struct SetBlendConstantCmd {
Color color;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp
index f3196541d63..8e1b79cf5f2 100644
--- a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp
@@ -44,27 +44,64 @@ namespace dawn_native {
void OwnedCompilationMessages::AddMessage(std::string message,
wgpu::CompilationMessageType type,
uint64_t lineNum,
- uint64_t linePos) {
+ uint64_t linePos,
+ uint64_t offset,
+ uint64_t length) {
// Cannot add messages after GetCompilationInfo has been called.
ASSERT(mCompilationInfo.messages == nullptr);
mMessageStrings.push_back(message);
- mMessages.push_back(
- {nullptr, static_cast<WGPUCompilationMessageType>(type), lineNum, linePos});
+ mMessages.push_back({nullptr, static_cast<WGPUCompilationMessageType>(type), lineNum,
+ linePos, offset, length});
}
void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
// Cannot add messages after GetCompilationInfo has been called.
ASSERT(mCompilationInfo.messages == nullptr);
+ // Tint line and column values are 1-based.
+ uint64_t lineNum = diagnostic.source.range.begin.line;
+ uint64_t linePos = diagnostic.source.range.begin.column;
+ // The offset is 0-based.
+ uint64_t offset = 0;
+ uint64_t length = 0;
+
+ if (lineNum && linePos && diagnostic.source.file_content) {
+ const std::vector<std::string>& lines = diagnostic.source.file_content->lines;
+ size_t i = 0;
+ // To find the offset of the message position, loop through each of the first lineNum-1
+ // lines and add it's length (+1 to account for the line break) to the offset.
+ for (; i < lineNum - 1; ++i) {
+ offset += lines[i].length() + 1;
+ }
+
+ // If the end line is on a different line from the beginning line, add the length of the
+ // lines in between to the ending offset.
+ uint64_t endLineNum = diagnostic.source.range.end.line;
+ uint64_t endLinePos = diagnostic.source.range.end.column;
+ uint64_t endOffset = offset;
+ for (; i < endLineNum - 1; ++i) {
+ endOffset += lines[i].length() + 1;
+ }
+
+ // Add the line positions to the offset and endOffset to get their final positions
+ // within the code string.
+ offset += linePos - 1;
+ endOffset += endLinePos - 1;
+
+ // The length of the message is the difference between the starting offset and the
+ // ending offset.
+ length = endOffset - offset;
+ }
+
if (diagnostic.code) {
mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
} else {
mMessageStrings.push_back(diagnostic.message);
}
- mMessages.push_back({nullptr, tintSeverityToMessageType(diagnostic.severity),
- diagnostic.source.range.begin.line,
- diagnostic.source.range.begin.column});
+
+ mMessages.push_back({nullptr, tintSeverityToMessageType(diagnostic.severity), lineNum,
+ linePos, offset, length});
}
void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
diff --git a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h
index 02c449d0c0e..8a681e60313 100644
--- a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h
+++ b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h
@@ -37,7 +37,9 @@ namespace dawn_native {
void AddMessage(std::string message,
wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
uint64_t lineNum = 0,
- uint64_t linePos = 0);
+ uint64_t linePos = 0,
+ uint64_t offset = 0,
+ uint64_t length = 0);
void AddMessage(const tint::diag::Diagnostic& diagnostic);
void AddMessages(const tint::diag::List& diagnostics);
void ClearMessages();
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
index 04bb8ff56fb..88c7e69950f 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
@@ -20,6 +20,7 @@
#include "dawn_native/Commands.h"
#include "dawn_native/ComputePipeline.h"
#include "dawn_native/Device.h"
+#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/QuerySet.h"
namespace dawn_native {
@@ -27,15 +28,14 @@ namespace dawn_native {
ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
CommandEncoder* commandEncoder,
EncodingContext* encodingContext)
- : ProgrammablePassEncoder(device, encodingContext, PassType::Compute),
- mCommandEncoder(commandEncoder) {
+ : ProgrammablePassEncoder(device, encodingContext), mCommandEncoder(commandEncoder) {
}
ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
CommandEncoder* commandEncoder,
EncodingContext* encodingContext,
ErrorTag errorTag)
- : ProgrammablePassEncoder(device, encodingContext, errorTag, PassType::Compute),
+ : ProgrammablePassEncoder(device, encodingContext, errorTag),
mCommandEncoder(commandEncoder) {
}
@@ -65,14 +65,13 @@ namespace dawn_native {
DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
}
- // Skip noop dispatch. It is a workaround for system crashes on 0 dispatches on some
- // platforms.
- if (x != 0 && y != 0 && z != 0) {
- DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
- dispatch->x = x;
- dispatch->y = y;
- dispatch->z = z;
- }
+ // Record the synchronization scope for Dispatch, which is just the current bindgroups.
+ AddDispatchSyncScope();
+
+ DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
+ dispatch->x = x;
+ dispatch->y = y;
+ dispatch->z = z;
return {};
});
@@ -106,13 +105,18 @@ namespace dawn_native {
}
}
+ // Record the synchronization scope for Dispatch, both the bindgroups and the indirect
+ // buffer.
+ SyncScopeUsageTracker scope;
+ scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+ mUsageTracker.AddReferencedBuffer(indirectBuffer);
+ AddDispatchSyncScope(std::move(scope));
+
DispatchIndirectCmd* dispatch =
allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
dispatch->indirectBuffer = indirectBuffer;
dispatch->indirectOffset = indirectOffset;
- mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
-
return {};
});
}
@@ -133,6 +137,27 @@ namespace dawn_native {
});
}
+ void ComputePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ BindGroupIndex groupIndex(groupIndexIn);
+
+ if (IsValidationEnabled()) {
+ DAWN_TRY(
+ ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
+ }
+
+ mUsageTracker.AddResourcesReferencedByBindGroup(group);
+
+ RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+ mCommandBufferState.SetBindGroup(groupIndex, group);
+
+ return {};
+ });
+ }
+
void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
@@ -151,4 +176,12 @@ namespace dawn_native {
});
}
+ void ComputePassEncoder::AddDispatchSyncScope(SyncScopeUsageTracker scope) {
+ PipelineLayoutBase* layout = mCommandBufferState.GetPipelineLayout();
+ for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ scope.AddBindGroup(mCommandBufferState.GetBindGroup(i));
+ }
+ mUsageTracker.AddDispatch(scope.AcquireSyncScopeUsage());
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
index fcff7a9f965..cbf4f612906 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
@@ -15,11 +15,15 @@
#ifndef DAWNNATIVE_COMPUTEPASSENCODER_H_
#define DAWNNATIVE_COMPUTEPASSENCODER_H_
+#include "dawn_native/CommandBufferStateTracker.h"
#include "dawn_native/Error.h"
+#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/ProgrammablePassEncoder.h"
namespace dawn_native {
+ class SyncScopeUsageTracker;
+
class ComputePassEncoder final : public ProgrammablePassEncoder {
public:
ComputePassEncoder(DeviceBase* device,
@@ -36,6 +40,11 @@ namespace dawn_native {
void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
void APISetPipeline(ComputePipelineBase* pipeline);
+ void APISetBindGroup(uint32_t groupIndex,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount = 0,
+ const uint32_t* dynamicOffsets = nullptr);
+
void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
protected:
@@ -45,6 +54,13 @@ namespace dawn_native {
ErrorTag errorTag);
private:
+ CommandBufferStateTracker mCommandBufferState;
+
+ // Adds the bindgroups used for the current dispatch to the SyncScopeResourceUsage and
+ // records it in mUsageTracker.
+ void AddDispatchSyncScope(SyncScopeUsageTracker scope = {});
+ ComputePassResourceUsageTracker mUsageTracker;
+
// For render and compute passes, the encoding context is borrowed from the command encoder.
// Keep a reference to the encoder to make sure the context isn't freed.
Ref<CommandEncoder> mCommandEncoder;
diff --git a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
index 1dc232989d1..4ed5b7d05c2 100644
--- a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
@@ -38,16 +38,23 @@ namespace dawn_native {
u_scale : vec2<f32>;
u_offset : vec2<f32>;
};
+ [[binding(0), group(0)]] var<uniform> uniforms : Uniforms;
+
const texcoord : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
vec2<f32>(-0.5, 0.0),
vec2<f32>( 1.5, 0.0),
vec2<f32>( 0.5, 2.0));
- [[location(0)]] var<out> v_texcoord: vec2<f32>;
- [[builtin(position)]] var<out> Position : vec4<f32>;
- [[builtin(vertex_index)]] var<in> VertexIndex : u32;
- [[binding(0), group(0)]] var<uniform> uniforms : Uniforms;
- [[stage(vertex)]] fn main() -> void {
- Position = vec4<f32>((texcoord[VertexIndex] * 2.0 - vec2<f32>(1.0, 1.0)), 0.0, 1.0);
+
+ struct VertexOutputs {
+ [[location(0)]] texcoords : vec2<f32>;
+ [[builtin(position)]] position : vec4<f32>;
+ };
+
+ [[stage(vertex)]] fn main(
+ [[builtin(vertex_index)]] VertexIndex : u32
+ ) -> VertexOutputs {
+ var output : VertexOutputs;
+ output.position = vec4<f32>((texcoord[VertexIndex] * 2.0 - vec2<f32>(1.0, 1.0)), 0.0, 1.0);
// Y component of scale is calculated by the copySizeHeight / textureHeight. Only
// flipY case can get negative number.
@@ -59,33 +66,38 @@ namespace dawn_native {
// We need to get the mirror positions(mirrored based on y = 0.5) on flip cases.
// Adopt transform to src texture and then mapping it to triangle coord which
// do a +1 shift on Y dimension will help us got that mirror position perfectly.
- v_texcoord = (texcoord[VertexIndex] * uniforms.u_scale + uniforms.u_offset) *
- vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0);
+ output.texcoords = (texcoord[VertexIndex] * uniforms.u_scale + uniforms.u_offset) *
+ vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0);
} else {
// For the normal case, we need to get the exact position.
// So mapping texture to triangle firstly then adopt the transform.
- v_texcoord = (texcoord[VertexIndex] *
- vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0)) *
- uniforms.u_scale + uniforms.u_offset;
+ output.texcoords = (texcoord[VertexIndex] *
+ vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0)) *
+ uniforms.u_scale + uniforms.u_offset;
}
+
+ return output;
}
)";
static const char sCopyTextureForBrowserFragment[] = R"(
[[binding(1), group(0)]] var mySampler: sampler;
[[binding(2), group(0)]] var myTexture: texture_2d<f32>;
- [[location(0)]] var<in> v_texcoord : vec2<f32>;
- [[location(0)]] var<out> outputColor : vec4<f32>;
- [[stage(fragment)]] fn main() -> void {
+
+ [[stage(fragment)]] fn main(
+ [[location(0)]] texcoord : vec2<f32>
+ ) -> [[location(0)]] vec4<f32> {
// Clamp the texcoord and discard the out-of-bound pixels.
var clampedTexcoord : vec2<f32> =
- clamp(v_texcoord, vec2<f32>(0.0, 0.0), vec2<f32>(1.0, 1.0));
- if (all(clampedTexcoord == v_texcoord)) {
- var srcColor : vec4<f32> = textureSample(myTexture, mySampler, v_texcoord);
- // Swizzling of texture formats when sampling / rendering is handled by the
- // hardware so we don't need special logic in this shader. This is covered by tests.
- outputColor = srcColor;
+ clamp(texcoord, vec2<f32>(0.0, 0.0), vec2<f32>(1.0, 1.0));
+ if (!all(clampedTexcoord == texcoord)) {
+ discard;
}
+
+ var srcColor : vec4<f32> = textureSample(myTexture, mySampler, texcoord);
+ // Swizzling of texture formats when sampling / rendering is handled by the
+ // hardware so we don't need special logic in this shader. This is covered by tests.
+ return srcColor;
}
)";
@@ -130,6 +142,30 @@ namespace dawn_native {
return {};
}
+ MaybeError ValidateSourceOriginAndCopyExtent(const ImageCopyTexture source,
+ const Extent3D copySize) {
+ if (source.origin.z > 0) {
+ return DAWN_VALIDATION_ERROR("Source origin cannot have non-zero z value");
+ }
+
+ if (copySize.depthOrArrayLayers > 1) {
+ return DAWN_VALIDATION_ERROR("Cannot copy to multiple slices");
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateSourceAndDestinationTextureSampleCount(
+ const ImageCopyTexture source,
+ const ImageCopyTexture destination) {
+ if (source.texture->GetSampleCount() > 1 || destination.texture->GetSampleCount() > 1) {
+ return DAWN_VALIDATION_ERROR(
+ "Source and destiantion textures cannot be multisampled");
+ }
+
+ return {};
+ }
+
RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store,
wgpu::TextureFormat dstFormat) {
auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
@@ -139,7 +175,7 @@ namespace dawn_native {
return nullptr;
}
- RenderPipelineBase* GetOrCreateCopyTextureForBrowserPipeline(
+ ResultOrError<RenderPipelineBase*> GetOrCreateCopyTextureForBrowserPipeline(
DeviceBase* device,
wgpu::TextureFormat dstFormat) {
InternalPipelineStore* store = device->GetInternalPipelineStore();
@@ -152,9 +188,8 @@ namespace dawn_native {
wgslDesc.source = sCopyTextureForBrowserVertex;
descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- store->copyTextureForBrowserVS =
- AcquireRef(device->APICreateShaderModule(&descriptor));
+ DAWN_TRY_ASSIGN(store->copyTextureForBrowserVS,
+ device->CreateShaderModule(&descriptor));
}
ShaderModuleBase* vertexModule = store->copyTextureForBrowserVS.Get();
@@ -165,9 +200,8 @@ namespace dawn_native {
ShaderModuleWGSLDescriptor wgslDesc;
wgslDesc.source = sCopyTextureForBrowserFragment;
descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- store->copyTextureForBrowserFS =
- AcquireRef(device->APICreateShaderModule(&descriptor));
+ DAWN_TRY_ASSIGN(store->copyTextureForBrowserFS,
+ device->CreateShaderModule(&descriptor));
}
ShaderModuleBase* fragmentModule = store->copyTextureForBrowserFS.Get();
@@ -200,9 +234,9 @@ namespace dawn_native {
fragment.targetCount = 1;
fragment.targets = &target;
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- store->copyTextureForBrowserPipelines.insert(
- {dstFormat, AcquireRef(device->APICreateRenderPipeline2(&renderPipelineDesc))});
+ Ref<RenderPipelineBase> pipeline;
+ DAWN_TRY_ASSIGN(pipeline, device->CreateRenderPipeline(&renderPipelineDesc));
+ store->copyTextureForBrowserPipelines.insert({dstFormat, std::move(pipeline)});
}
return GetCachedPipeline(store, dstFormat);
@@ -221,13 +255,18 @@ namespace dawn_native {
DAWN_TRY(ValidateImageCopyTexture(device, *source, *copySize));
DAWN_TRY(ValidateImageCopyTexture(device, *destination, *copySize));
+ DAWN_TRY(ValidateSourceOriginAndCopyExtent(*source, *copySize));
DAWN_TRY(ValidateCopyTextureForBrowserRestrictions(*source, *destination, *copySize));
+ DAWN_TRY(ValidateSourceAndDestinationTextureSampleCount(*source, *destination));
DAWN_TRY(ValidateTextureCopyRange(device, *source, *copySize));
DAWN_TRY(ValidateTextureCopyRange(device, *destination, *copySize));
DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::Sampled));
+
DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::RenderAttachment));
DAWN_TRY(ValidateCopyTextureFormatConversion(source->texture->GetFormat().format,
destination->texture->GetFormat().format));
@@ -245,12 +284,18 @@ namespace dawn_native {
// TODO(shaobo.yan@intel.com): In D3D12 and Vulkan, compatible texture format can directly
// copy to each other. This can be a potential fast path.
- RenderPipelineBase* pipeline = GetOrCreateCopyTextureForBrowserPipeline(
- device, destination->texture->GetFormat().format);
+ // Noop copy
+ if (copySize->width == 0 || copySize->height == 0 || copySize->depthOrArrayLayers == 0) {
+ return {};
+ }
+
+ RenderPipelineBase* pipeline;
+ DAWN_TRY_ASSIGN(pipeline, GetOrCreateCopyTextureForBrowserPipeline(
+ device, destination->texture->GetFormat().format));
// Prepare bind group layout.
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<BindGroupLayoutBase> layout = AcquireRef(pipeline->APIGetBindGroupLayout(0));
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
// Prepare bind group descriptor
BindGroupEntry bindGroupEntries[3] = {};
@@ -280,8 +325,8 @@ namespace dawn_native {
BufferDescriptor uniformDesc = {};
uniformDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
uniformDesc.size = sizeof(uniformData);
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<BufferBase> uniformBuffer = AcquireRef(device->APICreateBuffer(&uniformDesc));
+ Ref<BufferBase> uniformBuffer;
+ DAWN_TRY_ASSIGN(uniformBuffer, device->CreateBuffer(&uniformDesc));
DAWN_TRY(device->GetQueue()->WriteBuffer(uniformBuffer.Get(), 0, uniformData,
sizeof(uniformData)));
@@ -289,16 +334,17 @@ namespace dawn_native {
// Prepare binding 1 resource: sampler
// Use default configuration, filterMode set to Nearest for min and mag.
SamplerDescriptor samplerDesc = {};
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<SamplerBase> sampler = AcquireRef(device->APICreateSampler(&samplerDesc));
+ Ref<SamplerBase> sampler;
+ DAWN_TRY_ASSIGN(sampler, device->CreateSampler(&samplerDesc));
// Prepare binding 2 resource: sampled texture
TextureViewDescriptor srcTextureViewDesc = {};
srcTextureViewDesc.baseMipLevel = source->mipLevel;
srcTextureViewDesc.mipLevelCount = 1;
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<TextureViewBase> srcTextureView =
- AcquireRef(source->texture->APICreateView(&srcTextureViewDesc));
+ srcTextureViewDesc.arrayLayerCount = 1;
+ Ref<TextureViewBase> srcTextureView;
+ DAWN_TRY_ASSIGN(srcTextureView,
+ device->CreateTextureView(source->texture, &srcTextureViewDesc));
// Set bind group entries.
bindGroupEntries[0].binding = 0;
@@ -310,8 +356,8 @@ namespace dawn_native {
bindGroupEntries[2].textureView = srcTextureView.Get();
// Create bind group after all binding entries are set.
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<BindGroupBase> bindGroup = AcquireRef(device->APICreateBindGroup(&bgDesc));
+ Ref<BindGroupBase> bindGroup;
+ DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bgDesc));
// Create command encoder.
CommandEncoderDescriptor encoderDesc = {};
@@ -322,14 +368,16 @@ namespace dawn_native {
TextureViewDescriptor dstTextureViewDesc;
dstTextureViewDesc.baseMipLevel = destination->mipLevel;
dstTextureViewDesc.mipLevelCount = 1;
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<TextureViewBase> dstView =
- AcquireRef(destination->texture->APICreateView(&dstTextureViewDesc));
+ dstTextureViewDesc.baseArrayLayer = destination->origin.z;
+ dstTextureViewDesc.arrayLayerCount = 1;
+ Ref<TextureViewBase> dstView;
+ DAWN_TRY_ASSIGN(dstView,
+ device->CreateTextureView(destination->texture, &dstTextureViewDesc));
// Prepare render pass color attachment descriptor.
RenderPassColorAttachmentDescriptor colorAttachmentDesc;
- colorAttachmentDesc.attachment = dstView.Get();
+ colorAttachmentDesc.view = dstView.Get();
colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
colorAttachmentDesc.storeOp = wgpu::StoreOp::Store;
colorAttachmentDesc.clearColor = {0.0, 0.0, 0.0, 1.0};
diff --git a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.cpp b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp
index a9fcf620a1f..b6a32b12e75 100644
--- a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn_native/CreatePipelineAsyncTracker.h"
+#include "dawn_native/CreatePipelineAsyncTask.h"
#include "dawn_native/ComputePipeline.h"
#include "dawn_native/Device.h"
@@ -20,25 +20,23 @@
namespace dawn_native {
- CreatePipelineAsyncTaskBase::CreatePipelineAsyncTaskBase(std::string errorMessage,
- void* userdata)
+ CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(
+ std::string errorMessage,
+ void* userdata)
: mErrorMessage(errorMessage), mUserData(userdata) {
}
- CreatePipelineAsyncTaskBase::~CreatePipelineAsyncTaskBase() {
- }
-
- CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
+ CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
Ref<ComputePipelineBase> pipeline,
std::string errorMessage,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata)
- : CreatePipelineAsyncTaskBase(errorMessage, userdata),
+ : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
mPipeline(std::move(pipeline)),
mCreateComputePipelineAsyncCallback(callback) {
}
- void CreateComputePipelineAsyncTask::Finish() {
+ void CreateComputePipelineAsyncCallbackTask::Finish() {
ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
if (mPipeline.Get() != nullptr) {
@@ -51,31 +49,31 @@ namespace dawn_native {
}
}
- void CreateComputePipelineAsyncTask::HandleShutDown() {
+ void CreateComputePipelineAsyncCallbackTask::HandleShutDown() {
ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
"Device destroyed before callback", mUserData);
}
- void CreateComputePipelineAsyncTask::HandleDeviceLoss() {
+ void CreateComputePipelineAsyncCallbackTask::HandleDeviceLoss() {
ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
"Device lost before callback", mUserData);
}
- CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
+ CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask(
Ref<RenderPipelineBase> pipeline,
std::string errorMessage,
WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata)
- : CreatePipelineAsyncTaskBase(errorMessage, userdata),
+ : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
mPipeline(std::move(pipeline)),
mCreateRenderPipelineAsyncCallback(callback) {
}
- void CreateRenderPipelineAsyncTask::Finish() {
+ void CreateRenderPipelineAsyncCallbackTask::Finish() {
ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
if (mPipeline.Get() != nullptr) {
@@ -88,59 +86,18 @@ namespace dawn_native {
}
}
- void CreateRenderPipelineAsyncTask::HandleShutDown() {
+ void CreateRenderPipelineAsyncCallbackTask::HandleShutDown() {
ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
"Device destroyed before callback", mUserData);
}
- void CreateRenderPipelineAsyncTask::HandleDeviceLoss() {
+ void CreateRenderPipelineAsyncCallbackTask::HandleDeviceLoss() {
ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
"Device lost before callback", mUserData);
}
- CreatePipelineAsyncTracker::CreatePipelineAsyncTracker(DeviceBase* device) : mDevice(device) {
- }
-
- CreatePipelineAsyncTracker::~CreatePipelineAsyncTracker() {
- ASSERT(mCreatePipelineAsyncTasksInFlight.Empty());
- }
-
- void CreatePipelineAsyncTracker::TrackTask(std::unique_ptr<CreatePipelineAsyncTaskBase> task,
- ExecutionSerial serial) {
- mCreatePipelineAsyncTasksInFlight.Enqueue(std::move(task), serial);
- mDevice->AddFutureSerial(serial);
- }
-
- void CreatePipelineAsyncTracker::Tick(ExecutionSerial finishedSerial) {
- // If a user calls Queue::Submit inside Create*PipelineAsync, then the device will be
- // ticked, which in turns ticks the tracker, causing reentrance here. To prevent the
- // reentrant call from invalidating mCreatePipelineAsyncTasksInFlight while in use by the
- // first call, we remove the tasks to finish from the queue, update
- // mCreatePipelineAsyncTasksInFlight, then run the callbacks.
- std::vector<std::unique_ptr<CreatePipelineAsyncTaskBase>> tasks;
- for (auto& task : mCreatePipelineAsyncTasksInFlight.IterateUpTo(finishedSerial)) {
- tasks.push_back(std::move(task));
- }
- mCreatePipelineAsyncTasksInFlight.ClearUpTo(finishedSerial);
-
- for (auto& task : tasks) {
- if (mDevice->IsLost()) {
- task->HandleDeviceLoss();
- } else {
- task->Finish();
- }
- }
- }
-
- void CreatePipelineAsyncTracker::ClearForShutDown() {
- for (auto& task : mCreatePipelineAsyncTasksInFlight.IterateAll()) {
- task->HandleShutDown();
- }
- mCreatePipelineAsyncTasksInFlight.Clear();
- }
-
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h
new file mode 100644
index 00000000000..9cddfa2e345
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h
@@ -0,0 +1,68 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
+#define DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
+
+#include "common/RefCounted.h"
+#include "dawn/webgpu.h"
+#include "dawn_native/CallbackTaskManager.h"
+
+namespace dawn_native {
+
+ class ComputePipelineBase;
+ class DeviceBase;
+ class RenderPipelineBase;
+
+ struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
+ CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
+
+ protected:
+ std::string mErrorMessage;
+ void* mUserData;
+ };
+
+ struct CreateComputePipelineAsyncCallbackTask final : CreatePipelineAsyncCallbackTaskBase {
+ CreateComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+
+ void Finish() final;
+ void HandleShutDown() final;
+ void HandleDeviceLoss() final;
+
+ private:
+ Ref<ComputePipelineBase> mPipeline;
+ WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
+ };
+
+ struct CreateRenderPipelineAsyncCallbackTask final : CreatePipelineAsyncCallbackTaskBase {
+ CreateRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ void Finish() final;
+ void HandleShutDown() final;
+ void HandleDeviceLoss() final;
+
+ private:
+ Ref<RenderPipelineBase> mPipeline;
+ WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.h b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.h
deleted file mode 100644
index b84daed2e6c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_CREATEPIPELINEASYNCTRACKER_H_
-#define DAWNNATIVE_CREATEPIPELINEASYNCTRACKER_H_
-
-#include "common/RefCounted.h"
-#include "common/SerialQueue.h"
-#include "dawn/webgpu.h"
-#include "dawn_native/IntegerTypes.h"
-
-#include <memory>
-#include <string>
-
-namespace dawn_native {
-
- class ComputePipelineBase;
- class DeviceBase;
- class RenderPipelineBase;
-
- struct CreatePipelineAsyncTaskBase {
- CreatePipelineAsyncTaskBase(std::string errorMessage, void* userData);
- virtual ~CreatePipelineAsyncTaskBase();
-
- virtual void Finish() = 0;
- virtual void HandleShutDown() = 0;
- virtual void HandleDeviceLoss() = 0;
-
- protected:
- std::string mErrorMessage;
- void* mUserData;
- };
-
- struct CreateComputePipelineAsyncTask final : public CreatePipelineAsyncTaskBase {
- CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
-
- void Finish() final;
- void HandleShutDown() final;
- void HandleDeviceLoss() final;
-
- private:
- Ref<ComputePipelineBase> mPipeline;
- WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
- };
-
- struct CreateRenderPipelineAsyncTask final : public CreatePipelineAsyncTaskBase {
- CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- void Finish() final;
- void HandleShutDown() final;
- void HandleDeviceLoss() final;
-
- private:
- Ref<RenderPipelineBase> mPipeline;
- WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
- };
-
- class CreatePipelineAsyncTracker {
- public:
- explicit CreatePipelineAsyncTracker(DeviceBase* device);
- ~CreatePipelineAsyncTracker();
-
- void TrackTask(std::unique_ptr<CreatePipelineAsyncTaskBase> task, ExecutionSerial serial);
- void Tick(ExecutionSerial finishedSerial);
- void ClearForShutDown();
-
- private:
- DeviceBase* mDevice;
- SerialQueue<ExecutionSerial, std::unique_ptr<CreatePipelineAsyncTaskBase>>
- mCreatePipelineAsyncTasksInFlight;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_CREATEPIPELINEASYNCTRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.cpp b/chromium/third_party/dawn/src/dawn_native/Device.cpp
index 91c905f5f7e..713688eb253 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Device.cpp
@@ -20,11 +20,12 @@
#include "dawn_native/BindGroup.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/Buffer.h"
+#include "dawn_native/CallbackTaskManager.h"
#include "dawn_native/CommandBuffer.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/CompilationMessages.h"
#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/CreatePipelineAsyncTracker.h"
+#include "dawn_native/CreatePipelineAsyncTask.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorData.h"
#include "dawn_native/ErrorScope.h"
@@ -125,7 +126,7 @@ namespace dawn_native {
mCaches = std::make_unique<DeviceBase::Caches>();
mErrorScopeStack = std::make_unique<ErrorScopeStack>();
mDynamicUploader = std::make_unique<DynamicUploader>(this);
- mCreatePipelineAsyncTracker = std::make_unique<CreatePipelineAsyncTracker>(this);
+ mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
mInternalPipelineStore = std::make_unique<InternalPipelineStore>();
mPersistentCache = std::make_unique<PersistentCache>(this);
@@ -142,8 +143,11 @@ namespace dawn_native {
void DeviceBase::ShutDownBase() {
// Skip handling device facilities if they haven't even been created (or failed doing so)
if (mState != State::BeingCreated) {
- // Reject all async pipeline creations.
- mCreatePipelineAsyncTracker->ClearForShutDown();
+ // Call all the callbacks immediately as the device is about to shut down.
+ auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+ for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+ callbackTask->HandleShutDown();
+ }
}
// Disconnect the device, depending on which state we are currently in.
@@ -188,7 +192,7 @@ namespace dawn_native {
mState = State::Disconnected;
mDynamicUploader = nullptr;
- mCreatePipelineAsyncTracker = nullptr;
+ mCallbackTaskManager = nullptr;
mPersistentCache = nullptr;
mEmptyBindGroupLayout = nullptr;
@@ -238,6 +242,10 @@ namespace dawn_native {
}
mQueue->HandleDeviceLoss();
+ auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+ for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+ callbackTask->HandleDeviceLoss();
+ }
// Still forward device loss errors to the error scopes so they all reject.
mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
@@ -683,7 +691,7 @@ namespace dawn_native {
BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
Ref<BindGroupBase> result;
- if (ConsumedError(CreateBindGroupInternal(descriptor), &result)) {
+ if (ConsumedError(CreateBindGroup(descriptor), &result)) {
return BindGroupBase::MakeError(this);
}
return result.Detach();
@@ -691,14 +699,14 @@ namespace dawn_native {
BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
const BindGroupLayoutDescriptor* descriptor) {
Ref<BindGroupLayoutBase> result;
- if (ConsumedError(CreateBindGroupLayoutInternal(descriptor), &result)) {
+ if (ConsumedError(CreateBindGroupLayout(descriptor), &result)) {
return BindGroupLayoutBase::MakeError(this);
}
return result.Detach();
}
BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
Ref<BufferBase> result = nullptr;
- if (ConsumedError(CreateBufferInternal(descriptor), &result)) {
+ if (ConsumedError(CreateBuffer(descriptor), &result)) {
ASSERT(result == nullptr);
return BufferBase::MakeError(this, descriptor);
}
@@ -711,7 +719,7 @@ namespace dawn_native {
ComputePipelineBase* DeviceBase::APICreateComputePipeline(
const ComputePipelineDescriptor* descriptor) {
Ref<ComputePipelineBase> result;
- if (ConsumedError(CreateComputePipelineInternal(descriptor), &result)) {
+ if (ConsumedError(CreateComputePipeline(descriptor), &result)) {
return ComputePipelineBase::MakeError(this);
}
return result.Detach();
@@ -719,10 +727,10 @@ namespace dawn_native {
void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) {
- MaybeError maybeResult = CreateComputePipelineAsyncInternal(descriptor, callback, userdata);
+ MaybeError maybeResult = CreateComputePipelineAsync(descriptor, callback, userdata);
// Call the callback directly when a validation error has been found in the front-end
- // validations. If there is no error, then CreateComputePipelineAsyncInternal will call the
+ // validations. If there is no error, then CreateComputePipelineAsync will call the
// callback.
if (maybeResult.IsError()) {
std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
@@ -733,21 +741,21 @@ namespace dawn_native {
PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor) {
Ref<PipelineLayoutBase> result;
- if (ConsumedError(CreatePipelineLayoutInternal(descriptor), &result)) {
+ if (ConsumedError(CreatePipelineLayout(descriptor), &result)) {
return PipelineLayoutBase::MakeError(this);
}
return result.Detach();
}
QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
Ref<QuerySetBase> result;
- if (ConsumedError(CreateQuerySetInternal(descriptor), &result)) {
+ if (ConsumedError(CreateQuerySet(descriptor), &result)) {
return QuerySetBase::MakeError(this);
}
return result.Detach();
}
SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
Ref<SamplerBase> result;
- if (ConsumedError(CreateSamplerInternal(descriptor), &result)) {
+ if (ConsumedError(CreateSampler(descriptor), &result)) {
return SamplerBase::MakeError(this);
}
return result.Detach();
@@ -756,7 +764,7 @@ namespace dawn_native {
WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata) {
ResultOrError<Ref<RenderPipelineBase>> maybeResult =
- CreateRenderPipelineInternal(descriptor);
+ CreateRenderPipeline(descriptor);
if (maybeResult.IsError()) {
std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
@@ -765,15 +773,15 @@ namespace dawn_native {
}
Ref<RenderPipelineBase> result = maybeResult.AcquireSuccess();
- std::unique_ptr<CreateRenderPipelineAsyncTask> request =
- std::make_unique<CreateRenderPipelineAsyncTask>(std::move(result), "", callback,
- userdata);
- mCreatePipelineAsyncTracker->TrackTask(std::move(request), GetPendingCommandSerial());
+ std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
+ std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), "", callback,
+ userdata);
+ mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
}
RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
const RenderBundleEncoderDescriptor* descriptor) {
Ref<RenderBundleEncoder> result;
- if (ConsumedError(CreateRenderBundleEncoderInternal(descriptor), &result)) {
+ if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result)) {
return RenderBundleEncoder::MakeError(this);
}
return result.Detach();
@@ -873,7 +881,7 @@ namespace dawn_native {
}
Ref<RenderPipelineBase> result;
- if (ConsumedError(CreateRenderPipelineInternal(&normalizedDescriptor), &result)) {
+ if (ConsumedError(CreateRenderPipeline(&normalizedDescriptor), &result)) {
return RenderPipelineBase::MakeError(this);
}
return result.Detach();
@@ -881,7 +889,7 @@ namespace dawn_native {
RenderPipelineBase* DeviceBase::APICreateRenderPipeline2(
const RenderPipelineDescriptor2* descriptor) {
Ref<RenderPipelineBase> result;
- if (ConsumedError(CreateRenderPipelineInternal(descriptor), &result)) {
+ if (ConsumedError(CreateRenderPipeline(descriptor), &result)) {
return RenderPipelineBase::MakeError(this);
}
return result.Detach();
@@ -889,7 +897,7 @@ namespace dawn_native {
ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
Ref<ShaderModuleBase> result;
ShaderModuleParseResult parseResult = {};
- if (ConsumedError(CreateShaderModuleInternal(descriptor, &parseResult), &result)) {
+ if (ConsumedError(CreateShaderModule(descriptor, &parseResult), &result)) {
return ShaderModuleBase::MakeError(this, std::move(parseResult.compilationMessages));
}
return result.Detach();
@@ -897,26 +905,18 @@ namespace dawn_native {
SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
const SwapChainDescriptor* descriptor) {
Ref<SwapChainBase> result;
- if (ConsumedError(CreateSwapChainInternal(surface, descriptor), &result)) {
+ if (ConsumedError(CreateSwapChain(surface, descriptor), &result)) {
return SwapChainBase::MakeError(this);
}
return result.Detach();
}
TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
Ref<TextureBase> result;
- if (ConsumedError(CreateTextureInternal(descriptor), &result)) {
+ if (ConsumedError(CreateTexture(descriptor), &result)) {
return TextureBase::MakeError(this);
}
return result.Detach();
}
- TextureViewBase* DeviceBase::CreateTextureView(TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- Ref<TextureViewBase> result;
- if (ConsumedError(CreateTextureViewInternal(texture, descriptor), &result)) {
- return TextureViewBase::MakeError(this);
- }
- return result.Detach();
- }
// For Dawn Wire
@@ -958,8 +958,19 @@ namespace dawn_native {
// reclaiming resources one tick earlier.
mDynamicUploader->Deallocate(mCompletedSerial);
mQueue->Tick(mCompletedSerial);
+ }
- mCreatePipelineAsyncTracker->Tick(mCompletedSerial);
+ // We have to check mCallbackTaskManager in every Tick because it is not related to any
+ // global serials.
+ if (!mCallbackTaskManager->IsEmpty()) {
+ // If a user calls Queue::Submit inside the callback, then the device will be ticked,
+ // which in turns ticks the tracker, causing reentrance and dead lock here. To prevent
+ // such reentrant call, we remove all the callback tasks from mCallbackTaskManager,
+ // update mCallbackTaskManager, then call all the callbacks.
+ auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+ for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+ callbackTask->Finish();
+ }
}
return {};
@@ -974,16 +985,10 @@ namespace dawn_native {
return mQueue.Get();
}
- QueueBase* DeviceBase::APIGetDefaultQueue() {
- EmitDeprecationWarning(
- "Device::GetDefaultQueue is deprecated, use Device::GetQueue() instead");
- return APIGetQueue();
- }
-
ExternalTextureBase* DeviceBase::APICreateExternalTexture(
const ExternalTextureDescriptor* descriptor) {
Ref<ExternalTextureBase> result = nullptr;
- if (ConsumedError(CreateExternalTextureInternal(descriptor), &result)) {
+ if (ConsumedError(CreateExternalTexture(descriptor), &result)) {
return ExternalTextureBase::MakeError(this);
}
@@ -1039,7 +1044,7 @@ namespace dawn_native {
// Implementation details of object creation
- ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroupInternal(
+ ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroup(
const BindGroupDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
@@ -1048,7 +1053,7 @@ namespace dawn_native {
return CreateBindGroupImpl(descriptor);
}
- ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayoutInternal(
+ ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayout(
const BindGroupLayoutDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
@@ -1057,8 +1062,7 @@ namespace dawn_native {
return GetOrCreateBindGroupLayout(descriptor);
}
- ResultOrError<Ref<BufferBase>> DeviceBase::CreateBufferInternal(
- const BufferDescriptor* descriptor) {
+ ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateBufferDescriptor(this, descriptor));
@@ -1074,7 +1078,7 @@ namespace dawn_native {
return std::move(buffer);
}
- ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipelineInternal(
+ ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipeline(
const ComputePipelineDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
@@ -1099,7 +1103,7 @@ namespace dawn_native {
return AddOrGetCachedPipeline(backendObj, blueprintHash);
}
- MaybeError DeviceBase::CreateComputePipelineAsyncInternal(
+ MaybeError DeviceBase::CreateComputePipelineAsync(
const ComputePipelineDescriptor* descriptor,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) {
@@ -1166,13 +1170,13 @@ namespace dawn_native {
result = AddOrGetCachedPipeline(resultOrError.AcquireSuccess(), blueprintHash);
}
- std::unique_ptr<CreateComputePipelineAsyncTask> request =
- std::make_unique<CreateComputePipelineAsyncTask>(result, errorMessage, callback,
- userdata);
- mCreatePipelineAsyncTracker->TrackTask(std::move(request), GetPendingCommandSerial());
+ std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
+ std::make_unique<CreateComputePipelineAsyncCallbackTask>(
+ std::move(result), errorMessage, callback, userdata);
+ mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
}
- ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayoutInternal(
+ ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
@@ -1181,7 +1185,7 @@ namespace dawn_native {
return GetOrCreatePipelineLayout(descriptor);
}
- ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTextureInternal(
+ ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTexture(
const ExternalTextureDescriptor* descriptor) {
if (IsValidationEnabled()) {
DAWN_TRY(ValidateExternalTextureDescriptor(this, descriptor));
@@ -1190,7 +1194,7 @@ namespace dawn_native {
return ExternalTextureBase::Create(this, descriptor);
}
- ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySetInternal(
+ ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(
const QuerySetDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
@@ -1199,7 +1203,7 @@ namespace dawn_native {
return CreateQuerySetImpl(descriptor);
}
- ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoderInternal(
+ ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoder(
const RenderBundleEncoderDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
@@ -1208,7 +1212,7 @@ namespace dawn_native {
return RenderBundleEncoder::Create(this, descriptor);
}
- ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipelineInternal(
+ ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipeline(
const RenderPipelineDescriptor2* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
@@ -1231,7 +1235,7 @@ namespace dawn_native {
}
}
- ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSamplerInternal(
+ ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(
const SamplerDescriptor* descriptor) {
const SamplerDescriptor defaultDescriptor = {};
DAWN_TRY(ValidateIsAlive());
@@ -1242,11 +1246,19 @@ namespace dawn_native {
return GetOrCreateSampler(descriptor);
}
- ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModuleInternal(
+ ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModule(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) {
DAWN_TRY(ValidateIsAlive());
+ // ShaderModule can be called from inside dawn_native. If that's the case handle the error
+ // directly in Dawn and don't need the parse results since there should be no validation
+ // errors.
+ ShaderModuleParseResult ignoredResults;
+ if (parseResult == nullptr) {
+ parseResult = &ignoredResults;
+ }
+
if (IsValidationEnabled()) {
DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult));
}
@@ -1254,7 +1266,7 @@ namespace dawn_native {
return GetOrCreateShaderModule(descriptor, parseResult);
}
- ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChainInternal(
+ ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChain(
Surface* surface,
const SwapChainDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
@@ -1285,25 +1297,22 @@ namespace dawn_native {
}
}
- ResultOrError<Ref<TextureBase>> DeviceBase::CreateTextureInternal(
- const TextureDescriptor* descriptor) {
+ ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
- TextureDescriptor fixedDescriptor = *descriptor;
- DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(this, &(fixedDescriptor.size)));
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateTextureDescriptor(this, &fixedDescriptor));
+ DAWN_TRY(ValidateTextureDescriptor(this, descriptor));
}
- return CreateTextureImpl(&fixedDescriptor);
+ return CreateTextureImpl(descriptor);
}
- ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureViewInternal(
+ ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureView(
TextureBase* texture,
const TextureViewDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
DAWN_TRY(ValidateObject(texture));
TextureViewDescriptor desc = GetTextureViewDescriptorWithDefaults(texture, descriptor);
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateTextureViewDescriptor(texture, &desc));
+ DAWN_TRY(ValidateTextureViewDescriptor(this, texture, &desc));
}
return CreateTextureViewImpl(texture, &desc);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.h b/chromium/third_party/dawn/src/dawn_native/Device.h
index c5e7ad81d10..600c2c1109f 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.h
+++ b/chromium/third_party/dawn/src/dawn_native/Device.h
@@ -34,7 +34,7 @@ namespace dawn_native {
class AttachmentState;
class AttachmentStateBlueprint;
class BindGroupLayoutBase;
- class CreatePipelineAsyncTracker;
+ class CallbackTaskManager;
class DynamicUploader;
class ErrorScopeStack;
class ExternalTextureBase;
@@ -140,7 +140,38 @@ namespace dawn_native {
Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
void UncacheAttachmentState(AttachmentState* obj);
- // Dawn API
+ // Object creation methods that be used in a reentrant manner.
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroup(const BindGroupDescriptor* descriptor);
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor);
+ ResultOrError<Ref<BufferBase>> CreateBuffer(const BufferDescriptor* descriptor);
+ ResultOrError<Ref<ComputePipelineBase>> CreateComputePipeline(
+ const ComputePipelineDescriptor* descriptor);
+ MaybeError CreateComputePipelineAsync(
+ const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ ResultOrError<Ref<ExternalTextureBase>> CreateExternalTexture(
+ const ExternalTextureDescriptor* descriptor);
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor);
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySet(const QuerySetDescriptor* descriptor);
+ ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoder(
+ const RenderBundleEncoderDescriptor* descriptor);
+ ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipeline(
+ const RenderPipelineDescriptor2* descriptor);
+ ResultOrError<Ref<SamplerBase>> CreateSampler(const SamplerDescriptor* descriptor);
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult = nullptr);
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
+ const SwapChainDescriptor* descriptor);
+ ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
+ ResultOrError<Ref<TextureViewBase>> CreateTextureView(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+
+ // Implementation of API object creation methods. DO NOT use them in a reentrant manner.
BindGroupBase* APICreateBindGroup(const BindGroupDescriptor* descriptor);
BindGroupLayoutBase* APICreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
BufferBase* APICreateBuffer(const BufferDescriptor* descriptor);
@@ -163,15 +194,12 @@ namespace dawn_native {
ShaderModuleBase* APICreateShaderModule(const ShaderModuleDescriptor* descriptor);
SwapChainBase* APICreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
TextureBase* APICreateTexture(const TextureDescriptor* descriptor);
- TextureViewBase* CreateTextureView(TextureBase* texture,
- const TextureViewDescriptor* descriptor);
+
InternalPipelineStore* GetInternalPipelineStore();
// For Dawn Wire
BufferBase* APICreateErrorBuffer();
- // TODO(dawn:22): Remove once the deprecation period is finished.
- QueueBase* APIGetDefaultQueue();
QueueBase* APIGetQueue();
void APIInjectError(wgpu::ErrorType type, const char* message);
@@ -299,39 +327,6 @@ namespace dawn_native {
ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupInternal(
- const BindGroupDescriptor* descriptor);
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutInternal(
- const BindGroupLayoutDescriptor* descriptor);
- ResultOrError<Ref<BufferBase>> CreateBufferInternal(const BufferDescriptor* descriptor);
- MaybeError CreateComputePipelineAsyncInternal(
- const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineInternal(
- const ComputePipelineDescriptor* descriptor);
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutInternal(
- const PipelineLayoutDescriptor* descriptor);
- ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureInternal(
- const ExternalTextureDescriptor* descriptor);
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetInternal(
- const QuerySetDescriptor* descriptor);
- ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoderInternal(
- const RenderBundleEncoderDescriptor* descriptor);
- ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineInternal(
- const RenderPipelineDescriptor2* descriptor);
- ResultOrError<Ref<SamplerBase>> CreateSamplerInternal(const SamplerDescriptor* descriptor);
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleInternal(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainInternal(
- Surface* surface,
- const SwapChainDescriptor* descriptor);
- ResultOrError<Ref<TextureBase>> CreateTextureInternal(const TextureDescriptor* descriptor);
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewInternal(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor);
-
ResultOrError<Ref<PipelineLayoutBase>> ValidateAndGetComputePipelineDescriptorWithDefaults(
const ComputePipelineDescriptor& descriptor,
ComputePipelineDescriptor* outDescriptor);
@@ -405,7 +400,7 @@ namespace dawn_native {
Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
std::unique_ptr<DynamicUploader> mDynamicUploader;
- std::unique_ptr<CreatePipelineAsyncTracker> mCreatePipelineAsyncTracker;
+ std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
Ref<QueueBase> mQueue;
struct DeprecationWarnings;
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp b/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
index 39b5d4c7bf1..fe347db9e51 100644
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
@@ -53,18 +53,17 @@ namespace dawn_native {
}
}
- void EncodingContext::HandleError(InternalErrorType type, const char* message) {
+ void EncodingContext::HandleError(std::unique_ptr<ErrorData> error) {
if (!IsFinished()) {
// Encoding should only generate validation errors.
- ASSERT(type == InternalErrorType::Validation);
+ ASSERT(error->GetType() == InternalErrorType::Validation);
// If the encoding context is not finished, errors are deferred until
// Finish() is called.
- if (!mGotError) {
- mGotError = true;
- mErrorMessage = message;
+ if (mError == nullptr) {
+ mError = std::move(error);
}
} else {
- mDevice->HandleError(type, message);
+ mDevice->HandleError(error->GetType(), error->GetMessage().c_str());
}
}
@@ -76,25 +75,42 @@ namespace dawn_native {
mCurrentEncoder = passEncoder;
}
- void EncodingContext::ExitPass(const ObjectBase* passEncoder, PassResourceUsage passUsage) {
- // Assert we're not at the top level.
+ void EncodingContext::ExitPass(const ObjectBase* passEncoder, RenderPassResourceUsage usages) {
ASSERT(mCurrentEncoder != mTopLevelEncoder);
- // Assert the pass encoder is current.
ASSERT(mCurrentEncoder == passEncoder);
mCurrentEncoder = mTopLevelEncoder;
- mPassUsages.push_back(std::move(passUsage));
+ mRenderPassUsages.push_back(std::move(usages));
}
- const PerPassUsages& EncodingContext::GetPassUsages() const {
- ASSERT(!mWerePassUsagesAcquired);
- return mPassUsages;
+ void EncodingContext::ExitPass(const ObjectBase* passEncoder, ComputePassResourceUsage usages) {
+ ASSERT(mCurrentEncoder != mTopLevelEncoder);
+ ASSERT(mCurrentEncoder == passEncoder);
+
+ mCurrentEncoder = mTopLevelEncoder;
+ mComputePassUsages.push_back(std::move(usages));
+ }
+
+ const RenderPassUsages& EncodingContext::GetRenderPassUsages() const {
+ ASSERT(!mWereRenderPassUsagesAcquired);
+ return mRenderPassUsages;
+ }
+
+ RenderPassUsages EncodingContext::AcquireRenderPassUsages() {
+ ASSERT(!mWereRenderPassUsagesAcquired);
+ mWereRenderPassUsagesAcquired = true;
+ return std::move(mRenderPassUsages);
+ }
+
+ const ComputePassUsages& EncodingContext::GetComputePassUsages() const {
+ ASSERT(!mWereComputePassUsagesAcquired);
+ return mComputePassUsages;
}
- PerPassUsages EncodingContext::AcquirePassUsages() {
- ASSERT(!mWerePassUsagesAcquired);
- mWerePassUsagesAcquired = true;
- return std::move(mPassUsages);
+ ComputePassUsages EncodingContext::AcquireComputePassUsages() {
+ ASSERT(!mWereComputePassUsagesAcquired);
+ mWereComputePassUsagesAcquired = true;
+ return std::move(mComputePassUsages);
}
MaybeError EncodingContext::Finish() {
@@ -111,8 +127,8 @@ namespace dawn_native {
mCurrentEncoder = nullptr;
mTopLevelEncoder = nullptr;
- if (mGotError) {
- return DAWN_VALIDATION_ERROR(mErrorMessage);
+ if (mError != nullptr) {
+ return std::move(mError);
}
if (currentEncoder != topLevelEncoder) {
return DAWN_VALIDATION_ERROR("Command buffer recording ended mid-pass");
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h b/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
index e74819a4cb1..b97e317abb0 100644
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
+++ b/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
@@ -39,15 +39,11 @@ namespace dawn_native {
CommandIterator* GetIterator();
// Functions to handle encoder errors
- void HandleError(InternalErrorType type, const char* message);
-
- inline void ConsumeError(std::unique_ptr<ErrorData> error) {
- HandleError(error->GetType(), error->GetMessage().c_str());
- }
+ void HandleError(std::unique_ptr<ErrorData> error);
inline bool ConsumedError(MaybeError maybeError) {
if (DAWN_UNLIKELY(maybeError.IsError())) {
- ConsumeError(maybeError.AcquireError());
+ HandleError(maybeError.AcquireError());
return true;
}
return false;
@@ -57,11 +53,10 @@ namespace dawn_native {
if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
if (mCurrentEncoder != mTopLevelEncoder) {
// The top level encoder was used when a pass encoder was current.
- HandleError(InternalErrorType::Validation,
- "Command cannot be recorded inside a pass");
+ HandleError(DAWN_VALIDATION_ERROR("Command cannot be recorded inside a pass"));
} else {
- HandleError(InternalErrorType::Validation,
- "Recording in an error or already ended pass encoder");
+ HandleError(DAWN_VALIDATION_ERROR(
+ "Recording in an error or already ended pass encoder"));
}
return false;
}
@@ -79,11 +74,14 @@ namespace dawn_native {
// Functions to set current encoder state
void EnterPass(const ObjectBase* passEncoder);
- void ExitPass(const ObjectBase* passEncoder, PassResourceUsage passUsages);
+ void ExitPass(const ObjectBase* passEncoder, RenderPassResourceUsage usages);
+ void ExitPass(const ObjectBase* passEncoder, ComputePassResourceUsage usages);
MaybeError Finish();
- const PerPassUsages& GetPassUsages() const;
- PerPassUsages AcquirePassUsages();
+ const RenderPassUsages& GetRenderPassUsages() const;
+ const ComputePassUsages& GetComputePassUsages() const;
+ RenderPassUsages AcquireRenderPassUsages();
+ ComputePassUsages AcquireComputePassUsages();
private:
bool IsFinished() const;
@@ -101,16 +99,17 @@ namespace dawn_native {
// CommandEncoder::Begin/EndPass.
const ObjectBase* mCurrentEncoder;
- PerPassUsages mPassUsages;
- bool mWerePassUsagesAcquired = false;
+ RenderPassUsages mRenderPassUsages;
+ bool mWereRenderPassUsagesAcquired = false;
+ ComputePassUsages mComputePassUsages;
+ bool mWereComputePassUsagesAcquired = false;
CommandAllocator mAllocator;
CommandIterator mIterator;
bool mWasMovedToIterator = false;
bool mWereCommandsAcquired = false;
- bool mGotError = false;
- std::string mErrorMessage;
+ std::unique_ptr<ErrorData> mError;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
index b65ba467d04..76d06c5f490 100644
--- a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
@@ -55,6 +55,8 @@ namespace dawn_native {
ASSERT(descriptor);
ASSERT(descriptor->plane0);
+ DAWN_TRY(device->ValidateObject(descriptor->plane0));
+
const Format* format;
DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.cpp b/chromium/third_party/dawn/src/dawn_native/Format.cpp
index 926c47f9e47..b5d8c845217 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Format.cpp
@@ -304,6 +304,7 @@ namespace dawn_native {
// because its size isn't well defined, is it 4, 5 or 8?
AddMultiAspectFormat(wgpu::TextureFormat::Depth24PlusStencil8,
Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Stencil8, true, true);
+ // TODO(dawn:690): Implement Depth16Unorm, Depth24UnormStencil8, Depth32FloatStencil8.
// BC compressed formats
bool isBCFormatSupported = device->IsExtensionEnabled(Extension::TextureCompressionBC);
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.cpp b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
index 7d65e4d185e..2b88f5a9b41 100644
--- a/chromium/third_party/dawn/src/dawn_native/Instance.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
@@ -150,10 +150,12 @@ namespace dawn_native {
Register(vulkan::Connect(this, true), wgpu::BackendType::Vulkan);
# endif // defined(DAWN_ENABLE_SWIFTSHADER)
#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
-#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
Register(opengl::Connect(this, wgpu::BackendType::OpenGL), wgpu::BackendType::OpenGL);
+#endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
Register(opengl::Connect(this, wgpu::BackendType::OpenGLES), wgpu::BackendType::OpenGLES);
-#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
+#endif // defined(DAWN_ENABLE_BACKEND_OPENGLES)
#if defined(DAWN_ENABLE_BACKEND_NULL)
Register(null::Connect(this), wgpu::BackendType::Null);
#endif // defined(DAWN_ENABLE_BACKEND_NULL)
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
index 772f8c2e344..3168c39a5b0 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
@@ -23,34 +23,70 @@
namespace dawn_native {
+ // This file declares various "ResourceUsage" structures. They are produced by the frontend
+ // while recording commands to be used for later validation and also some operations in the
+ // backends. The are produced by the "Encoder" objects that finalize them on "EndPass" or
+ // "Finish". Internally the "Encoder" may use the "StateTracker" to create them.
+
class BufferBase;
class QuerySetBase;
class TextureBase;
- enum class PassType { Render, Compute };
-
// The texture usage inside passes must be tracked per-subresource.
- using PassTextureUsage = SubresourceStorage<wgpu::TextureUsage>;
+ using TextureSubresourceUsage = SubresourceStorage<wgpu::TextureUsage>;
- // Which resources are used by pass and how they are used. The command buffer validation
- // pre-computes this information so that backends with explicit barriers don't have to
- // re-compute it.
- struct PassResourceUsage {
- PassType passType;
+ // Which resources are used by a synchronization scope and how they are used. The command
+ // buffer validation pre-computes this information so that backends with explicit barriers
+ // don't have to re-compute it.
+ struct SyncScopeResourceUsage {
std::vector<BufferBase*> buffers;
std::vector<wgpu::BufferUsage> bufferUsages;
std::vector<TextureBase*> textures;
- std::vector<PassTextureUsage> textureUsages;
+ std::vector<TextureSubresourceUsage> textureUsages;
+ };
+
+ // Contains all the resource usage data for a compute pass.
+ //
+ // Essentially a list of SyncScopeResourceUsage, one per Dispatch as required by the WebGPU
+ // specification. ComputePassResourceUsage also stores nline the set of all buffers and
+ // textures used, because some unused BindGroups may not be used at all in synchronization
+ // scope but their resources still need to be validated on Queue::Submit.
+ struct ComputePassResourceUsage {
+ // Somehow without this defaulted constructor, MSVC or its STDlib have an issue where they
+ // use the copy constructor (that's deleted) when doing operations on a
+ // vector<ComputePassResourceUsage>
+ ComputePassResourceUsage(ComputePassResourceUsage&&) = default;
+ ComputePassResourceUsage() = default;
+ std::vector<SyncScopeResourceUsage> dispatchUsages;
+
+ // All the resources referenced by this compute pass for validation in Queue::Submit.
+ std::set<BufferBase*> referencedBuffers;
+ std::set<TextureBase*> referencedTextures;
+ };
+
+ // Contains all the resource usage data for a render pass.
+ //
+ // In the WebGPU specification render passes are synchronization scopes but we also need to
+ // track additional data. It is stored for render passes used by a CommandBuffer, but also in
+ // RenderBundle so they can be merged into the render passes' usage on ExecuteBundles().
+ struct RenderPassResourceUsage : public SyncScopeResourceUsage {
+ // Storage to track the occlusion queries used during the pass.
std::vector<QuerySetBase*> querySets;
std::vector<std::vector<bool>> queryAvailabilities;
};
- using PerPassUsages = std::vector<PassResourceUsage>;
+ using RenderPassUsages = std::vector<RenderPassResourceUsage>;
+ using ComputePassUsages = std::vector<ComputePassResourceUsage>;
+ // Contains a hierarchy of "ResourceUsage" that mirrors the hierarchy of the CommandBuffer and
+ // is used for validation and to produce barriers and lazy clears in the backends.
struct CommandBufferResourceUsage {
- PerPassUsages perPass;
+ RenderPassUsages renderPasses;
+ ComputePassUsages computePasses;
+
+ // Resources used in commands that aren't in a pass.
std::set<BufferBase*> topLevelBuffers;
std::set<TextureBase*> topLevelTextures;
std::set<QuerySetBase*> usedQuerySets;
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
index 81d72edf408..75db4563414 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/PassResourceUsageTracker.h"
+#include "dawn_native/BindGroup.h"
#include "dawn_native/Buffer.h"
#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/Format.h"
@@ -23,27 +24,24 @@
#include <utility>
namespace dawn_native {
- PassResourceUsageTracker::PassResourceUsageTracker(PassType passType) : mPassType(passType) {
- }
- void PassResourceUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
+ void SyncScopeUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
// std::map's operator[] will create the key and return 0 if the key didn't exist
// before.
mBufferUsages[buffer] |= usage;
}
- void PassResourceUsageTracker::TextureViewUsedAs(TextureViewBase* view,
- wgpu::TextureUsage usage) {
+ void SyncScopeUsageTracker::TextureViewUsedAs(TextureViewBase* view, wgpu::TextureUsage usage) {
TextureBase* texture = view->GetTexture();
const SubresourceRange& range = view->GetSubresourceRange();
- // Get or create a new PassTextureUsage for that texture (initially filled with
+ // Get or create a new TextureSubresourceUsage for that texture (initially filled with
// wgpu::TextureUsage::None)
auto it = mTextureUsages.emplace(
std::piecewise_construct, std::forward_as_tuple(texture),
std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
texture->GetNumMipLevels(), wgpu::TextureUsage::None));
- PassTextureUsage& textureUsage = it.first->second;
+ TextureSubresourceUsage& textureUsage = it.first->second;
textureUsage.Update(range,
[usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
@@ -51,48 +49,78 @@ namespace dawn_native {
});
}
- void PassResourceUsageTracker::AddTextureUsage(TextureBase* texture,
- const PassTextureUsage& textureUsage) {
- // Get or create a new PassTextureUsage for that texture (initially filled with
+ void SyncScopeUsageTracker::AddTextureUsage(TextureBase* texture,
+ const TextureSubresourceUsage& textureUsage) {
+ // Get or create a new TextureSubresourceUsage for that texture (initially filled with
// wgpu::TextureUsage::None)
auto it = mTextureUsages.emplace(
std::piecewise_construct, std::forward_as_tuple(texture),
std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
texture->GetNumMipLevels(), wgpu::TextureUsage::None));
- PassTextureUsage* passTextureUsage = &it.first->second;
+ TextureSubresourceUsage* passTextureUsage = &it.first->second;
passTextureUsage->Merge(
textureUsage, [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
const wgpu::TextureUsage& addedUsage) { *storedUsage |= addedUsage; });
}
- void PassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
- uint32_t queryIndex) {
- // The query availability only need to be tracked again on render pass for checking query
- // overwrite on render pass and resetting query set on Vulkan backend.
- DAWN_ASSERT(mPassType == PassType::Render);
- DAWN_ASSERT(querySet != nullptr);
-
- // Gets the iterator for that querySet or create a new vector of bool set to false
- // if the querySet wasn't registered.
- auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
- it->second[queryIndex] = true;
- }
-
- const QueryAvailabilityMap& PassResourceUsageTracker::GetQueryAvailabilityMap() const {
- return mQueryAvailabilities;
+ void SyncScopeUsageTracker::AddBindGroup(BindGroupBase* group) {
+ for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
+ break;
+ case wgpu::BufferBindingType::Storage:
+ BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
+ break;
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ BufferUsedAs(buffer, kReadOnlyStorageBuffer);
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+ break;
+ }
+
+ case BindingInfoType::Texture: {
+ TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+ TextureViewUsedAs(view, wgpu::TextureUsage::Sampled);
+ break;
+ }
+
+ case BindingInfoType::StorageTexture: {
+ TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+ switch (bindingInfo.storageTexture.access) {
+ case wgpu::StorageTextureAccess::ReadOnly:
+ TextureViewUsedAs(view, kReadOnlyStorageTexture);
+ break;
+ case wgpu::StorageTextureAccess::WriteOnly:
+ TextureViewUsedAs(view, wgpu::TextureUsage::Storage);
+ break;
+ case wgpu::StorageTextureAccess::Undefined:
+ UNREACHABLE();
+ }
+ break;
+ }
+
+ case BindingInfoType::Sampler:
+ break;
+ }
+ }
}
- // Returns the per-pass usage for use by backends for APIs with explicit barriers.
- PassResourceUsage PassResourceUsageTracker::AcquireResourceUsage() {
- PassResourceUsage result;
- result.passType = mPassType;
+ SyncScopeResourceUsage SyncScopeUsageTracker::AcquireSyncScopeUsage() {
+ SyncScopeResourceUsage result;
result.buffers.reserve(mBufferUsages.size());
result.bufferUsages.reserve(mBufferUsages.size());
result.textures.reserve(mTextureUsages.size());
result.textureUsages.reserve(mTextureUsages.size());
- result.querySets.reserve(mQueryAvailabilities.size());
- result.queryAvailabilities.reserve(mQueryAvailabilities.size());
for (auto& it : mBufferUsages) {
result.buffers.push_back(it.first);
@@ -104,16 +132,78 @@ namespace dawn_native {
result.textureUsages.push_back(std::move(it.second));
}
+ mBufferUsages.clear();
+ mTextureUsages.clear();
+
+ return result;
+ }
+
+ void ComputePassResourceUsageTracker::AddDispatch(SyncScopeResourceUsage scope) {
+ mUsage.dispatchUsages.push_back(std::move(scope));
+ }
+
+ void ComputePassResourceUsageTracker::AddReferencedBuffer(BufferBase* buffer) {
+ mUsage.referencedBuffers.insert(buffer);
+ }
+
+ void ComputePassResourceUsageTracker::AddResourcesReferencedByBindGroup(BindGroupBase* group) {
+ for (BindingIndex index{0}; index < group->GetLayout()->GetBindingCount(); ++index) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(index);
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ mUsage.referencedBuffers.insert(group->GetBindingAsBufferBinding(index).buffer);
+ break;
+ }
+
+ case BindingInfoType::Texture: {
+ mUsage.referencedTextures.insert(
+ group->GetBindingAsTextureView(index)->GetTexture());
+ break;
+ }
+
+ case BindingInfoType::StorageTexture:
+ case BindingInfoType::Sampler:
+ break;
+ }
+ }
+ }
+
+ ComputePassResourceUsage ComputePassResourceUsageTracker::AcquireResourceUsage() {
+ return std::move(mUsage);
+ }
+
+ RenderPassResourceUsage RenderPassResourceUsageTracker::AcquireResourceUsage() {
+ RenderPassResourceUsage result;
+ *static_cast<SyncScopeResourceUsage*>(&result) = AcquireSyncScopeUsage();
+
+ result.querySets.reserve(mQueryAvailabilities.size());
+ result.queryAvailabilities.reserve(mQueryAvailabilities.size());
+
for (auto& it : mQueryAvailabilities) {
result.querySets.push_back(it.first);
result.queryAvailabilities.push_back(std::move(it.second));
}
- mBufferUsages.clear();
- mTextureUsages.clear();
mQueryAvailabilities.clear();
return result;
}
+ void RenderPassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
+ uint32_t queryIndex) {
+ // The query availability only needs to be tracked again on render passes for checking
+ // query overwrite on render pass and resetting query sets on the Vulkan backend.
+ DAWN_ASSERT(querySet != nullptr);
+
+ // Gets the iterator for that querySet or create a new vector of bool set to false
+ // if the querySet wasn't registered.
+ auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
+ it->second[queryIndex] = true;
+ }
+
+ const QueryAvailabilityMap& RenderPassResourceUsageTracker::GetQueryAvailabilityMap() const {
+ return mQueryAvailabilities;
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
index cd54f8c3464..56b585489eb 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
@@ -23,35 +23,58 @@
namespace dawn_native {
+ class BindGroupBase;
class BufferBase;
class QuerySetBase;
class TextureBase;
using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
- // Helper class to encapsulate the logic of tracking per-resource usage during the
- // validation of command buffer passes. It is used both to know if there are validation
- // errors, and to get a list of resources used per pass for backends that need the
- // information.
- class PassResourceUsageTracker {
+ // Helper class to build SyncScopeResourceUsages
+ class SyncScopeUsageTracker {
public:
- PassResourceUsageTracker(PassType passType);
void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
void TextureViewUsedAs(TextureViewBase* texture, wgpu::TextureUsage usage);
- void AddTextureUsage(TextureBase* texture, const PassTextureUsage& textureUsage);
- void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
- const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
+ void AddTextureUsage(TextureBase* texture, const TextureSubresourceUsage& textureUsage);
+
+ // Walks the bind groups and tracks all its resources.
+ void AddBindGroup(BindGroupBase* group);
// Returns the per-pass usage for use by backends for APIs with explicit barriers.
- PassResourceUsage AcquireResourceUsage();
+ SyncScopeResourceUsage AcquireSyncScopeUsage();
private:
- PassType mPassType;
std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
- std::map<TextureBase*, PassTextureUsage> mTextureUsages;
- // Dedicated to track the availability of the queries used on render pass. The same query
- // cannot be written twice in same render pass, so each render pass also need to have its
- // own query availability map for validation.
+ std::map<TextureBase*, TextureSubresourceUsage> mTextureUsages;
+ };
+
+ // Helper class to build ComputePassResourceUsages
+ class ComputePassResourceUsageTracker {
+ public:
+ void AddDispatch(SyncScopeResourceUsage scope);
+ void AddReferencedBuffer(BufferBase* buffer);
+ void AddResourcesReferencedByBindGroup(BindGroupBase* group);
+
+ ComputePassResourceUsage AcquireResourceUsage();
+
+ private:
+ ComputePassResourceUsage mUsage;
+ };
+
+ // Helper class to build RenderPassResourceUsages
+ class RenderPassResourceUsageTracker : public SyncScopeUsageTracker {
+ public:
+ void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+ const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
+
+ RenderPassResourceUsage AcquireResourceUsage();
+
+ private:
+ // Hide AcquireSyncScopeUsage since users of this class should use AcquireResourceUsage
+ // instead.
+ using SyncScopeUsageTracker::AcquireSyncScopeUsage;
+
+ // Tracks queries used in the render pass to validate that they aren't written twice.
QueryAvailabilityMap mQueryAvailabilities;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
index e5933f555d2..c650afa1a4f 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
@@ -125,21 +125,24 @@ namespace dawn_native {
return {};
}
- BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
- if (GetDevice()->ConsumedError(ValidateGetBindGroupLayout(groupIndexIn))) {
- return BindGroupLayoutBase::MakeError(GetDevice());
- }
+ ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(
+ uint32_t groupIndexIn) {
+ DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
BindGroupIndex groupIndex(groupIndexIn);
-
- BindGroupLayoutBase* bgl = nullptr;
if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
- bgl = GetDevice()->GetEmptyBindGroupLayout();
+ return Ref<BindGroupLayoutBase>(GetDevice()->GetEmptyBindGroupLayout());
} else {
- bgl = mLayout->GetBindGroupLayout(groupIndex);
+ return Ref<BindGroupLayoutBase>(mLayout->GetBindGroupLayout(groupIndex));
+ }
+ }
+
+ BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
+ Ref<BindGroupLayoutBase> result;
+ if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result)) {
+ return BindGroupLayoutBase::MakeError(GetDevice());
}
- bgl->Reference();
- return bgl;
+ return result.Detach();
}
size_t PipelineBase::ComputeContentHash() {
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.h b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
index 008845d62d8..3ed80f1148d 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
@@ -50,12 +50,15 @@ namespace dawn_native {
const ProgrammableStage& GetStage(SingleShaderStage stage) const;
const PerStage<ProgrammableStage>& GetAllStages() const;
- BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
+ ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);
// Helper functions for std::unordered_map-based pipeline caches.
size_t ComputeContentHash() override;
static bool EqualForCache(const PipelineBase* a, const PipelineBase* b);
+ // Implementation of the API entrypoint. Do not use in a reentrant manner.
+ BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
+
protected:
PipelineBase(DeviceBase* device,
PipelineLayoutBase* layout,
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
index c905de1b275..c4be109a5c7 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
@@ -27,76 +27,18 @@
namespace dawn_native {
- namespace {
- void TrackBindGroupResourceUsage(PassResourceUsageTracker* usageTracker,
- BindGroupBase* group) {
- for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
- ++bindingIndex) {
- const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- usageTracker->BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
- break;
- case wgpu::BufferBindingType::Storage:
- usageTracker->BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
- break;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- usageTracker->BufferUsedAs(buffer, kReadOnlyStorageBuffer);
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- break;
- }
-
- case BindingInfoType::Texture: {
- TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
- usageTracker->TextureViewUsedAs(view, wgpu::TextureUsage::Sampled);
- break;
- }
-
- case BindingInfoType::StorageTexture: {
- TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::ReadOnly:
- usageTracker->TextureViewUsedAs(view, kReadOnlyStorageTexture);
- break;
- case wgpu::StorageTextureAccess::WriteOnly:
- usageTracker->TextureViewUsedAs(view, wgpu::TextureUsage::Storage);
- break;
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
- }
- break;
- }
-
- case BindingInfoType::Sampler:
- break;
- }
- }
- }
- } // namespace
-
ProgrammablePassEncoder::ProgrammablePassEncoder(DeviceBase* device,
- EncodingContext* encodingContext,
- PassType passType)
+ EncodingContext* encodingContext)
: ObjectBase(device),
mEncodingContext(encodingContext),
- mUsageTracker(passType),
mValidationEnabled(device->IsValidationEnabled()) {
}
ProgrammablePassEncoder::ProgrammablePassEncoder(DeviceBase* device,
EncodingContext* encodingContext,
- ErrorTag errorTag,
- PassType passType)
+ ErrorTag errorTag)
: ObjectBase(device, errorTag),
mEncodingContext(encodingContext),
- mUsageTracker(passType),
mValidationEnabled(device->IsValidationEnabled()) {
}
@@ -153,79 +95,75 @@ namespace dawn_native {
});
}
- void ProgrammablePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
- BindGroupBase* group,
- uint32_t dynamicOffsetCountIn,
- const uint32_t* dynamicOffsetsIn) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- BindGroupIndex groupIndex(groupIndexIn);
+ MaybeError ProgrammablePassEncoder::ValidateSetBindGroup(
+ BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCountIn,
+ const uint32_t* dynamicOffsetsIn) const {
+ DAWN_TRY(GetDevice()->ValidateObject(group));
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(group));
+ if (index >= kMaxBindGroupsTyped) {
+ return DAWN_VALIDATION_ERROR("Setting bind group over the max");
+ }
- if (groupIndex >= kMaxBindGroupsTyped) {
- return DAWN_VALIDATION_ERROR("Setting bind group over the max");
- }
+ ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
+ BindingIndex(dynamicOffsetCountIn));
- ityp::span<BindingIndex, const uint32_t> dynamicOffsets(
- dynamicOffsetsIn, BindingIndex(dynamicOffsetCountIn));
+ // Dynamic offsets count must match the number required by the layout perfectly.
+ const BindGroupLayoutBase* layout = group->GetLayout();
+ if (layout->GetDynamicBufferCount() != dynamicOffsets.size()) {
+ return DAWN_VALIDATION_ERROR("dynamicOffset count mismatch");
+ }
- // Dynamic offsets count must match the number required by the layout perfectly.
- const BindGroupLayoutBase* layout = group->GetLayout();
- if (layout->GetDynamicBufferCount() != dynamicOffsets.size()) {
- return DAWN_VALIDATION_ERROR("dynamicOffset count mismatch");
- }
+ for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
+ const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
- for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
- const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
-
- // BGL creation sorts bindings such that the dynamic buffer bindings are first.
- // ASSERT that this true.
- ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
- ASSERT(bindingInfo.buffer.hasDynamicOffset);
-
- if (dynamicOffsets[i] % kMinDynamicBufferOffsetAlignment != 0) {
- return DAWN_VALIDATION_ERROR("Dynamic Buffer Offset need to be aligned");
- }
-
- BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
-
- // During BindGroup creation, validation ensures binding offset + binding size
- // <= buffer size.
- ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
- ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >=
- bufferBinding.offset);
-
- if ((dynamicOffsets[i] > bufferBinding.buffer->GetSize() -
- bufferBinding.offset - bufferBinding.size)) {
- if ((bufferBinding.buffer->GetSize() - bufferBinding.offset) ==
- bufferBinding.size) {
- return DAWN_VALIDATION_ERROR(
- "Dynamic offset out of bounds. The binding goes to the end of the "
- "buffer even with a dynamic offset of 0. Did you forget to specify "
- "the binding's size?");
- } else {
- return DAWN_VALIDATION_ERROR("Dynamic offset out of bounds");
- }
- }
- }
- }
+ // BGL creation sorts bindings such that the dynamic buffer bindings are first.
+ // ASSERT that this true.
+ ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+ ASSERT(bindingInfo.buffer.hasDynamicOffset);
- mCommandBufferState.SetBindGroup(groupIndex, group);
+ if (dynamicOffsets[i] % kMinDynamicBufferOffsetAlignment != 0) {
+ return DAWN_VALIDATION_ERROR("Dynamic Buffer Offset need to be aligned");
+ }
- SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
- cmd->index = groupIndex;
- cmd->group = group;
- cmd->dynamicOffsetCount = dynamicOffsetCountIn;
- if (dynamicOffsetCountIn > 0) {
- uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
- memcpy(offsets, dynamicOffsetsIn, dynamicOffsetCountIn * sizeof(uint32_t));
+ BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
+
+ // During BindGroup creation, validation ensures binding offset + binding size
+ // <= buffer size.
+ ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
+ ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >= bufferBinding.offset);
+
+ if ((dynamicOffsets[i] >
+ bufferBinding.buffer->GetSize() - bufferBinding.offset - bufferBinding.size)) {
+ if ((bufferBinding.buffer->GetSize() - bufferBinding.offset) ==
+ bufferBinding.size) {
+ return DAWN_VALIDATION_ERROR(
+ "Dynamic offset out of bounds. The binding goes to the end of the "
+ "buffer even with a dynamic offset of 0. Did you forget to specify "
+ "the binding's size?");
+ } else {
+ return DAWN_VALIDATION_ERROR("Dynamic offset out of bounds");
+ }
}
+ }
- TrackBindGroupResourceUsage(&mUsageTracker, group);
+ return {};
+ }
- return {};
- });
+ void ProgrammablePassEncoder::RecordSetBindGroup(CommandAllocator* allocator,
+ BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) const {
+ SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
+ cmd->index = index;
+ cmd->group = group;
+ cmd->dynamicOffsetCount = dynamicOffsetCount;
+ if (dynamicOffsetCount > 0) {
+ uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
+ memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
+ }
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
index 8816914cd72..4cea69628b3 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
@@ -15,11 +15,10 @@
#ifndef DAWNNATIVE_PROGRAMMABLEPASSENCODER_H_
#define DAWNNATIVE_PROGRAMMABLEPASSENCODER_H_
-#include "dawn_native/CommandBufferStateTracker.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Error.h"
+#include "dawn_native/IntegerTypes.h"
#include "dawn_native/ObjectBase.h"
-#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/dawn_platform.h"
@@ -30,34 +29,36 @@ namespace dawn_native {
// Base class for shared functionality between ComputePassEncoder and RenderPassEncoder.
class ProgrammablePassEncoder : public ObjectBase {
public:
- ProgrammablePassEncoder(DeviceBase* device,
- EncodingContext* encodingContext,
- PassType passType);
+ ProgrammablePassEncoder(DeviceBase* device, EncodingContext* encodingContext);
void APIInsertDebugMarker(const char* groupLabel);
void APIPopDebugGroup();
void APIPushDebugGroup(const char* groupLabel);
- void APISetBindGroup(uint32_t groupIndex,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount = 0,
- const uint32_t* dynamicOffsets = nullptr);
-
protected:
bool IsValidationEnabled() const;
MaybeError ValidateProgrammableEncoderEnd() const;
+ // Compute and render passes do different things on SetBindGroup. These are helper functions
+ // for the logic they have in common.
+ MaybeError ValidateSetBindGroup(BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCountIn,
+ const uint32_t* dynamicOffsetsIn) const;
+ void RecordSetBindGroup(CommandAllocator* allocator,
+ BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) const;
+
// Construct an "error" programmable pass encoder.
ProgrammablePassEncoder(DeviceBase* device,
EncodingContext* encodingContext,
- ErrorTag errorTag,
- PassType passType);
+ ErrorTag errorTag);
EncodingContext* mEncodingContext = nullptr;
- PassResourceUsageTracker mUsageTracker;
uint64_t mDebugGroupStackSize = 0;
- CommandBufferStateTracker mCommandBufferState;
private:
const bool mValidationEnabled;
diff --git a/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp b/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp
index 585e844c740..b9f58c5af96 100644
--- a/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp
@@ -28,9 +28,10 @@ namespace dawn_native {
namespace {
// Assert the offsets in dawn_native::TimestampParams are same with the ones in the shader
- static_assert(offsetof(dawn_native::TimestampParams, count) == 0, "");
- static_assert(offsetof(dawn_native::TimestampParams, offset) == 4, "");
- static_assert(offsetof(dawn_native::TimestampParams, period) == 8, "");
+ static_assert(offsetof(dawn_native::TimestampParams, first) == 0, "");
+ static_assert(offsetof(dawn_native::TimestampParams, count) == 4, "");
+ static_assert(offsetof(dawn_native::TimestampParams, offset) == 8, "");
+ static_assert(offsetof(dawn_native::TimestampParams, period) == 12, "");
static const char sConvertTimestampsToNanoseconds[] = R"(
struct Timestamp {
@@ -47,6 +48,7 @@ namespace dawn_native {
};
[[block]] struct TimestampParams {
+ first : u32;
count : u32;
offset : u32;
period : f32;
@@ -58,12 +60,11 @@ namespace dawn_native {
var<storage> availability : [[access(read)]] AvailabilityArr;
[[group(0), binding(2)]] var<uniform> params : TimestampParams;
- [[builtin(global_invocation_id)]] var<in> GlobalInvocationID : vec3<u32>;
const sizeofTimestamp : u32 = 8u;
[[stage(compute), workgroup_size(8, 1, 1)]]
- fn main() -> void {
+ fn main([[builtin(global_invocation_id)]] GlobalInvocationID : vec3<u32>) {
if (GlobalInvocationID.x >= params.count) { return; }
var index : u32 = GlobalInvocationID.x + params.offset / sizeofTimestamp;
@@ -71,7 +72,7 @@ namespace dawn_native {
var timestamp : Timestamp = timestamps.t[index];
// Return 0 for the unavailable value.
- if (availability.v[index] == 0u) {
+ if (availability.v[GlobalInvocationID.x + params.first] == 0u) {
timestamps.t[index].low = 0u;
timestamps.t[index].high = 0u;
return;
@@ -105,7 +106,8 @@ namespace dawn_native {
}
)";
- ComputePipelineBase* GetOrCreateTimestampComputePipeline(DeviceBase* device) {
+ ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(
+ DeviceBase* device) {
InternalPipelineStore* store = device->GetInternalPipelineStore();
if (store->timestampComputePipeline == nullptr) {
@@ -116,8 +118,7 @@ namespace dawn_native {
wgslDesc.source = sConvertTimestampsToNanoseconds;
descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- store->timestampCS = AcquireRef(device->APICreateShaderModule(&descriptor));
+ DAWN_TRY_ASSIGN(store->timestampCS, device->CreateShaderModule(&descriptor));
}
// Create ComputePipeline.
@@ -127,9 +128,8 @@ namespace dawn_native {
computePipelineDesc.computeStage.module = store->timestampCS.Get();
computePipelineDesc.computeStage.entryPoint = "main";
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- store->timestampComputePipeline =
- AcquireRef(device->APICreateComputePipeline(&computePipelineDesc));
+ DAWN_TRY_ASSIGN(store->timestampComputePipeline,
+ device->CreateComputePipeline(&computePipelineDesc));
}
return store->timestampComputePipeline.Get();
@@ -137,17 +137,18 @@ namespace dawn_native {
} // anonymous namespace
- void EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
- BufferBase* timestamps,
- BufferBase* availability,
- BufferBase* params) {
+ MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+ BufferBase* timestamps,
+ BufferBase* availability,
+ BufferBase* params) {
DeviceBase* device = encoder->GetDevice();
- ComputePipelineBase* pipeline = GetOrCreateTimestampComputePipeline(device);
+ ComputePipelineBase* pipeline;
+ DAWN_TRY_ASSIGN(pipeline, GetOrCreateTimestampComputePipeline(device));
// Prepare bind group layout.
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<BindGroupLayoutBase> layout = AcquireRef(pipeline->APIGetBindGroupLayout(0));
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
// Prepare bind group descriptor
std::array<BindGroupEntry, 3> bindGroupEntries = {};
@@ -168,8 +169,8 @@ namespace dawn_native {
bindGroupEntries[2].size = params->GetSize();
// Create bind group after all binding entries are set.
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<BindGroupBase> bindGroup = AcquireRef(device->APICreateBindGroup(&bgDesc));
+ Ref<BindGroupBase> bindGroup;
+ DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bgDesc));
// Create compute encoder and issue dispatch.
ComputePassDescriptor passDesc = {};
@@ -180,6 +181,8 @@ namespace dawn_native {
pass->APIDispatch(
static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
pass->APIEndPass();
+
+ return {};
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/QueryHelper.h b/chromium/third_party/dawn/src/dawn_native/QueryHelper.h
index 6e31bebc382..90f3398294b 100644
--- a/chromium/third_party/dawn/src/dawn_native/QueryHelper.h
+++ b/chromium/third_party/dawn/src/dawn_native/QueryHelper.h
@@ -15,6 +15,7 @@
#ifndef DAWNNATIVE_QUERYHELPER_H_
#define DAWNNATIVE_QUERYHELPER_H_
+#include "dawn_native/Error.h"
#include "dawn_native/ObjectBase.h"
namespace dawn_native {
@@ -23,15 +24,16 @@ namespace dawn_native {
class CommandEncoder;
struct TimestampParams {
+ uint32_t first;
uint32_t count;
uint32_t offset;
float period;
};
- void EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
- BufferBase* timestamps,
- BufferBase* availability,
- BufferBase* params);
+ MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+ BufferBase* timestamps,
+ BufferBase* availability,
+ BufferBase* params);
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.cpp b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
index c113c1efe10..eddee0ad7a9 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
@@ -315,21 +315,17 @@ namespace dawn_native {
size_t dataSize,
const TextureDataLayout* dataLayout,
const Extent3D* writeSize) {
- Extent3D fixedWriteSize = *writeSize;
- DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedWriteSize));
+ DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
- DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, &fixedWriteSize));
-
- if (fixedWriteSize.width == 0 || fixedWriteSize.height == 0 ||
- fixedWriteSize.depthOrArrayLayers == 0) {
+ if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depthOrArrayLayers == 0) {
return {};
}
const TexelBlockInfo& blockInfo =
destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
TextureDataLayout layout = *dataLayout;
- ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, fixedWriteSize);
- return WriteTextureImpl(*destination, data, layout, fixedWriteSize);
+ ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
+ return WriteTextureImpl(*destination, data, layout, *writeSize);
}
MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
@@ -389,14 +385,12 @@ namespace dawn_native {
const ImageCopyTexture* destination,
const Extent3D* copySize,
const CopyTextureForBrowserOptions* options) {
- Extent3D fixedCopySize = *copySize;
- DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedCopySize));
if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateCopyTextureForBrowser(GetDevice(), source, destination, &fixedCopySize,
- options));
+ DAWN_TRY(
+ ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options));
}
- return DoCopyTextureForBrowser(GetDevice(), source, destination, &fixedCopySize, options);
+ return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
}
MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
@@ -410,11 +404,21 @@ namespace dawn_native {
const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
- for (const PassResourceUsage& passUsages : usages.perPass) {
- for (const BufferBase* buffer : passUsages.buffers) {
+ for (const SyncScopeResourceUsage& scope : usages.renderPasses) {
+ for (const BufferBase* buffer : scope.buffers) {
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+ }
+
+ for (const TextureBase* texture : scope.textures) {
+ DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+ }
+ }
+
+ for (const ComputePassResourceUsage& pass : usages.computePasses) {
+ for (const BufferBase* buffer : pass.referencedBuffers) {
DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
}
- for (const TextureBase* texture : passUsages.textures) {
+ for (const TextureBase* texture : pass.referencedTextures) {
DAWN_TRY(texture->ValidateCanUseInSubmitNow());
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp b/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
index 930eb6e53f0..f4e0a8c0e09 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
@@ -24,7 +24,7 @@ namespace dawn_native {
RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
const RenderBundleDescriptor* descriptor,
Ref<AttachmentState> attachmentState,
- PassResourceUsage resourceUsage)
+ RenderPassResourceUsage resourceUsage)
: ObjectBase(encoder->GetDevice()),
mCommands(encoder->AcquireCommands()),
mAttachmentState(std::move(attachmentState)),
@@ -53,7 +53,7 @@ namespace dawn_native {
return mAttachmentState.Get();
}
- const PassResourceUsage& RenderBundleBase::GetResourceUsage() const {
+ const RenderPassResourceUsage& RenderBundleBase::GetResourceUsage() const {
ASSERT(!IsError());
return mResourceUsage;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundle.h b/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
index 41d686a86b4..f971ed6a369 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
@@ -36,14 +36,14 @@ namespace dawn_native {
RenderBundleBase(RenderBundleEncoder* encoder,
const RenderBundleDescriptor* descriptor,
Ref<AttachmentState> attachmentState,
- PassResourceUsage resourceUsage);
+ RenderPassResourceUsage resourceUsage);
static RenderBundleBase* MakeError(DeviceBase* device);
CommandIterator* GetCommands();
const AttachmentState* GetAttachmentState() const;
- const PassResourceUsage& GetResourceUsage() const;
+ const RenderPassResourceUsage& GetResourceUsage() const;
protected:
~RenderBundleBase() override;
@@ -53,7 +53,7 @@ namespace dawn_native {
CommandIterator mCommands;
Ref<AttachmentState> mAttachmentState;
- PassResourceUsage mResourceUsage;
+ RenderPassResourceUsage mResourceUsage;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
index 45647a42cad..daff3eb33bc 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
@@ -123,21 +123,20 @@ namespace dawn_native {
// errors.
DAWN_TRY(mBundleEncodingContext.Finish());
- PassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
+ RenderPassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(ValidateProgrammableEncoderEnd());
- DAWN_TRY(ValidateFinish(mBundleEncodingContext.GetIterator(), usages));
+ DAWN_TRY(ValidateFinish(usages));
}
return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), std::move(usages));
}
- MaybeError RenderBundleEncoder::ValidateFinish(CommandIterator* commands,
- const PassResourceUsage& usages) const {
+ MaybeError RenderBundleEncoder::ValidateFinish(const RenderPassResourceUsage& usages) const {
TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "RenderBundleEncoder::ValidateFinish");
DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(ValidatePassResourceUsage(usages));
+ DAWN_TRY(ValidateSyncScopeResourceUsage(usages));
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
index 27d5de26d21..13439b7ef73 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
@@ -41,7 +41,7 @@ namespace dawn_native {
RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
ResultOrError<RenderBundleBase*> FinishImpl(const RenderBundleDescriptor* descriptor);
- MaybeError ValidateFinish(CommandIterator* commands, const PassResourceUsage& usages) const;
+ MaybeError ValidateFinish(const RenderPassResourceUsage& usages) const;
EncodingContext mBundleEncodingContext;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
index f87bb3b54ee..f1e7c423be8 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
@@ -32,7 +32,7 @@ namespace dawn_native {
RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
EncodingContext* encodingContext,
Ref<AttachmentState> attachmentState)
- : ProgrammablePassEncoder(device, encodingContext, PassType::Render),
+ : ProgrammablePassEncoder(device, encodingContext),
mAttachmentState(std::move(attachmentState)),
mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
@@ -41,7 +41,7 @@ namespace dawn_native {
RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
EncodingContext* encodingContext,
ErrorTag errorTag)
- : ProgrammablePassEncoder(device, encodingContext, errorTag, PassType::Render),
+ : ProgrammablePassEncoder(device, encodingContext, errorTag),
mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
}
@@ -203,16 +203,6 @@ namespace dawn_native {
});
}
- void RenderEncoderBase::APISetIndexBufferWithFormat(BufferBase* buffer,
- wgpu::IndexFormat format,
- uint64_t offset,
- uint64_t size) {
- GetDevice()->EmitDeprecationWarning(
- "RenderEncoderBase::SetIndexBufferWithFormat is deprecated. Use "
- "RenderEncoderBase::SetIndexBuffer instead.");
- APISetIndexBuffer(buffer, format, offset, size);
- }
-
void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
wgpu::IndexFormat format,
uint64_t offset,
@@ -308,4 +298,24 @@ namespace dawn_native {
});
}
+ void RenderEncoderBase::APISetBindGroup(uint32_t groupIndexIn,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ BindGroupIndex groupIndex(groupIndexIn);
+
+ if (IsValidationEnabled()) {
+ DAWN_TRY(
+ ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
+ }
+
+ RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+ mCommandBufferState.SetBindGroup(groupIndex, group);
+ mUsageTracker.AddBindGroup(group);
+
+ return {};
+ });
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
index 4f312707eed..4976ee20cd0 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
@@ -16,7 +16,9 @@
#define DAWNNATIVE_RENDERENCODERBASE_H_
#include "dawn_native/AttachmentState.h"
+#include "dawn_native/CommandBufferStateTracker.h"
#include "dawn_native/Error.h"
+#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/ProgrammablePassEncoder.h"
namespace dawn_native {
@@ -47,10 +49,11 @@ namespace dawn_native {
wgpu::IndexFormat format,
uint64_t offset,
uint64_t size);
- void APISetIndexBufferWithFormat(BufferBase* buffer,
- wgpu::IndexFormat format,
- uint64_t offset,
- uint64_t size);
+
+ void APISetBindGroup(uint32_t groupIndex,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount = 0,
+ const uint32_t* dynamicOffsets = nullptr);
const AttachmentState* GetAttachmentState() const;
Ref<AttachmentState> AcquireAttachmentState();
@@ -59,6 +62,9 @@ namespace dawn_native {
// Construct an "error" render encoder base.
RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
+ CommandBufferStateTracker mCommandBufferState;
+ RenderPassResourceUsageTracker mUsageTracker;
+
private:
Ref<AttachmentState> mAttachmentState;
const bool mDisableBaseVertex;
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
index 4c48bb43e80..a7b6abc68a3 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
@@ -51,7 +51,7 @@ namespace dawn_native {
RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
CommandEncoder* commandEncoder,
EncodingContext* encodingContext,
- PassResourceUsageTracker usageTracker,
+ RenderPassResourceUsageTracker usageTracker,
Ref<AttachmentState> attachmentState,
QuerySetBase* occlusionQuerySet,
uint32_t renderTargetWidth,
@@ -115,15 +115,22 @@ namespace dawn_native {
});
}
- void RenderPassEncoder::APISetBlendColor(const Color* color) {
+ void RenderPassEncoder::APISetBlendConstant(const Color* color) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- SetBlendColorCmd* cmd = allocator->Allocate<SetBlendColorCmd>(Command::SetBlendColor);
+ SetBlendConstantCmd* cmd =
+ allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
cmd->color = *color;
return {};
});
}
+ void RenderPassEncoder::APISetBlendColor(const Color* color) {
+ GetDevice()->EmitDeprecationWarning(
+ "SetBlendColor has been deprecated in favor of SetBlendConstant.");
+ APISetBlendConstant(color);
+ }
+
void RenderPassEncoder::APISetViewport(float x,
float y,
float width,
@@ -214,7 +221,7 @@ namespace dawn_native {
for (uint32_t i = 0; i < count; ++i) {
bundles[i] = renderBundles[i];
- const PassResourceUsage& usages = bundles[i]->GetResourceUsage();
+ const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
index a8bf460548c..19fcc803654 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
@@ -27,7 +27,7 @@ namespace dawn_native {
RenderPassEncoder(DeviceBase* device,
CommandEncoder* commandEncoder,
EncodingContext* encodingContext,
- PassResourceUsageTracker usageTracker,
+ RenderPassResourceUsageTracker usageTracker,
Ref<AttachmentState> attachmentState,
QuerySetBase* occlusionQuerySet,
uint32_t renderTargetWidth,
@@ -40,7 +40,8 @@ namespace dawn_native {
void APIEndPass();
void APISetStencilReference(uint32_t reference);
- void APISetBlendColor(const Color* color);
+ void APISetBlendConstant(const Color* color);
+ void APISetBlendColor(const Color* color); // Deprecated
void APISetViewport(float x,
float y,
float width,
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
index eb306238ddb..c32034dd01a 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
@@ -16,6 +16,7 @@
#include "common/BitSetIterator.h"
#include "common/VertexFormatUtils.h"
+#include "dawn_native/ChainUtils_autogen.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
#include "dawn_native/ObjectContentHasher.h"
@@ -133,16 +134,13 @@ namespace dawn_native {
MaybeError ValidatePrimitiveState(const DeviceBase* device,
const PrimitiveState* descriptor) {
- const ChainedStruct* chained = descriptor->nextInChain;
- if (chained != nullptr) {
- if (chained->sType != wgpu::SType::PrimitiveDepthClampingState) {
- return DAWN_VALIDATION_ERROR("Unsupported sType");
- }
- if (!device->IsExtensionEnabled(Extension::DepthClamping)) {
- return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
- }
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+ wgpu::SType::PrimitiveDepthClampingState));
+ const PrimitiveDepthClampingState* clampInfo = nullptr;
+ FindInChain(descriptor->nextInChain, &clampInfo);
+ if (clampInfo && !device->IsExtensionEnabled(Extension::DepthClamping)) {
+ return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
}
-
DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat));
DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
@@ -211,7 +209,26 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateBlendState(const BlendState* descriptor) {
+ static constexpr wgpu::BlendFactor kFirstDeprecatedBlendFactor =
+ wgpu::BlendFactor::SrcColor;
+ static constexpr uint32_t kDeprecatedBlendFactorOffset = 100;
+
+ bool IsDeprecatedBlendFactor(wgpu::BlendFactor blendFactor) {
+ return blendFactor >= kFirstDeprecatedBlendFactor;
+ }
+
+ wgpu::BlendFactor NormalizeBlendFactor(wgpu::BlendFactor blendFactor) {
+ // If the specified format is from the deprecated range return the corresponding
+ // non-deprecated format.
+ if (blendFactor >= kFirstDeprecatedBlendFactor) {
+ uint32_t blendFactorValue = static_cast<uint32_t>(blendFactor);
+ return static_cast<wgpu::BlendFactor>(blendFactorValue -
+ kDeprecatedBlendFactorOffset);
+ }
+ return blendFactor;
+ }
+
+ MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) {
DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor));
@@ -219,10 +236,18 @@ namespace dawn_native {
DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
+ if (IsDeprecatedBlendFactor(descriptor->alpha.srcFactor) ||
+ IsDeprecatedBlendFactor(descriptor->alpha.dstFactor) ||
+ IsDeprecatedBlendFactor(descriptor->color.srcFactor) ||
+ IsDeprecatedBlendFactor(descriptor->color.dstFactor)) {
+ device->EmitDeprecationWarning(
+ "Blend factor enums have changed and the old enums will be removed soon.");
+ }
+
return {};
}
- MaybeError ValidateColorTargetState(const DeviceBase* device,
+ MaybeError ValidateColorTargetState(DeviceBase* device,
const ColorTargetState* descriptor,
bool fragmentWritten,
wgpu::TextureComponentType fragmentOutputBaseType) {
@@ -231,7 +256,7 @@ namespace dawn_native {
}
if (descriptor->blend) {
- DAWN_TRY(ValidateBlendState(descriptor->blend));
+ DAWN_TRY(ValidateBlendState(device, descriptor->blend));
}
DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
@@ -399,11 +424,10 @@ namespace dawn_native {
}
mPrimitive = descriptor->primitive;
- const ChainedStruct* chained = mPrimitive.nextInChain;
- if (chained != nullptr) {
- ASSERT(chained->sType == wgpu::SType::PrimitiveDepthClampingState);
- const auto* clampState = static_cast<const PrimitiveDepthClampingState*>(chained);
- mClampDepth = clampState->clampDepth;
+ const PrimitiveDepthClampingState* clampInfo = nullptr;
+ FindInChain(mPrimitive.nextInChain, &clampInfo);
+ if (clampInfo) {
+ mClampDepth = clampInfo->clampDepth;
}
mMultisample = descriptor->multisample;
@@ -440,6 +464,14 @@ namespace dawn_native {
if (target->blend != nullptr) {
mTargetBlend[i] = *target->blend;
mTargets[i].blend = &mTargetBlend[i];
+ mTargetBlend[i].alpha.srcFactor =
+ NormalizeBlendFactor(mTargetBlend[i].alpha.srcFactor);
+ mTargetBlend[i].alpha.dstFactor =
+ NormalizeBlendFactor(mTargetBlend[i].alpha.dstFactor);
+ mTargetBlend[i].color.srcFactor =
+ NormalizeBlendFactor(mTargetBlend[i].color.srcFactor);
+ mTargetBlend[i].color.dstFactor =
+ NormalizeBlendFactor(mTargetBlend[i].color.dstFactor);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp b/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
index a2dcaabd3b7..51db0805f54 100644
--- a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
@@ -58,7 +58,14 @@ namespace dawn_native {
DAWN_TRY(ValidateAddressMode(descriptor->addressModeU));
DAWN_TRY(ValidateAddressMode(descriptor->addressModeV));
DAWN_TRY(ValidateAddressMode(descriptor->addressModeW));
- DAWN_TRY(ValidateCompareFunction(descriptor->compare));
+
+ // CompareFunction::Undefined is tagged as invalid because it can't be used, except for the
+ // SamplerDescriptor where it is a special value that means the sampler is not a
+ // comparison-sampler.
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ DAWN_TRY(ValidateCompareFunction(descriptor->compare));
+ }
+
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
index af6b2d7b1e1..285afcf72a8 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
@@ -14,8 +14,10 @@
#include "dawn_native/ShaderModule.h"
+#include "common/HashUtils.h"
#include "common/VertexFormatUtils.h"
#include "dawn_native/BindGroupLayout.h"
+#include "dawn_native/ChainUtils_autogen.h"
#include "dawn_native/CompilationMessages.h"
#include "dawn_native/Device.h"
#include "dawn_native/ObjectContentHasher.h"
@@ -191,6 +193,10 @@ namespace dawn_native {
case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageTexture:
case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
return BindingInfoType::StorageTexture;
+
+ default:
+ UNREACHABLE();
+ return BindingInfoType::Buffer;
}
}
@@ -428,25 +434,6 @@ namespace dawn_native {
return std::move(program);
}
- MaybeError ValidateModule(const tint::Program* program,
- OwnedCompilationMessages* outMessages) {
- std::ostringstream errorStream;
- errorStream << "Tint program validation" << std::endl;
-
- tint::Validator validator;
- bool isValid = validator.Validate(program);
- if (outMessages != nullptr) {
- outMessages->AddMessages(validator.diagnostics());
- }
- if (!isValid) {
- auto err = validator.diagnostics().str();
- errorStream << "Validation: " << err << std::endl;
- return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
- }
-
- return {};
- }
-
ResultOrError<std::vector<uint32_t>> ModuleToSPIRV(const tint::Program* program) {
std::ostringstream errorStream;
errorStream << "Tint SPIR-V writer failure:" << std::endl;
@@ -1066,6 +1053,17 @@ namespace dawn_native {
return tintProgram != nullptr || spirv.size() > 0;
}
+ // TintSource is a PIMPL container for a tint::Source::File, which needs to be kept alive for as
+ // long as tint diagnostics are inspected / printed.
+ class TintSource {
+ public:
+ template <typename... ARGS>
+ TintSource(ARGS&&... args) : file(std::forward<ARGS>(args)...) {
+ }
+
+ tint::Source::File file;
+ };
+
MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) {
@@ -1076,72 +1074,58 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Shader module descriptor missing chained descriptor");
}
// For now only a single SPIRV or WGSL subdescriptor is allowed.
- if (chainedDescriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR(
- "Shader module descriptor chained nextInChain must be nullptr");
- }
+ DAWN_TRY(ValidateSingleSType(chainedDescriptor, wgpu::SType::ShaderModuleSPIRVDescriptor,
+ wgpu::SType::ShaderModuleWGSLDescriptor));
OwnedCompilationMessages* outMessages = parseResult->compilationMessages.get();
ScopedTintICEHandler scopedICEHandler(device);
- switch (chainedDescriptor->sType) {
- case wgpu::SType::ShaderModuleSPIRVDescriptor: {
- const auto* spirvDesc =
- static_cast<const ShaderModuleSPIRVDescriptor*>(chainedDescriptor);
- std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
- if (device->IsToggleEnabled(Toggle::UseTintGenerator)) {
- tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
- if (device->IsValidationEnabled()) {
- DAWN_TRY(ValidateModule(&program, outMessages));
- }
- parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
- } else {
- if (device->IsValidationEnabled()) {
- DAWN_TRY(ValidateSpirv(spirv.data(), spirv.size()));
- }
- parseResult->spirv = std::move(spirv);
+ const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+ FindInChain(chainedDescriptor, &spirvDesc);
+ const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+ FindInChain(chainedDescriptor, &wgslDesc);
+
+ if (spirvDesc) {
+ std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ if (device->IsToggleEnabled(Toggle::UseTintGenerator)) {
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+ } else {
+ if (device->IsValidationEnabled()) {
+ DAWN_TRY(ValidateSpirv(spirv.data(), spirv.size()));
}
- break;
+ parseResult->spirv = std::move(spirv);
}
+ } else if (wgslDesc) {
+ auto tintSource = std::make_unique<TintSource>("", wgslDesc->source);
- case wgpu::SType::ShaderModuleWGSLDescriptor: {
- const auto* wgslDesc =
- static_cast<const ShaderModuleWGSLDescriptor*>(chainedDescriptor);
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, ParseWGSL(&tintSource->file, outMessages));
- tint::Source::File file("", wgslDesc->source);
+ if (device->IsToggleEnabled(Toggle::UseTintGenerator)) {
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+ parseResult->tintSource = std::move(tintSource);
+ } else {
+ tint::transform::Manager transformManager;
+ transformManager.Add<tint::transform::Spirv>();
- tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseWGSL(&file, outMessages));
+ tint::transform::DataMap transformInputs;
- if (device->IsToggleEnabled(Toggle::UseTintGenerator)) {
- if (device->IsValidationEnabled()) {
- DAWN_TRY(ValidateModule(&program, outMessages));
- }
- parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
- } else {
- tint::transform::Manager transformManager;
- transformManager.append(
- std::make_unique<tint::transform::EmitVertexPointSize>());
- transformManager.append(std::make_unique<tint::transform::Spirv>());
- DAWN_TRY_ASSIGN(program,
- RunTransforms(&transformManager, &program, outMessages));
-
- if (device->IsValidationEnabled()) {
- DAWN_TRY(ValidateModule(&program, outMessages));
- }
+ tint::transform::Spirv::Config spirv_cfg;
+ spirv_cfg.emit_vertex_point_size = true;
+ transformInputs.Add<tint::transform::Spirv::Config>(spirv_cfg);
- std::vector<uint32_t> spirv;
- DAWN_TRY_ASSIGN(spirv, ModuleToSPIRV(&program));
- DAWN_TRY(ValidateSpirv(spirv.data(), spirv.size()));
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, &program, transformInputs,
+ nullptr, outMessages));
- parseResult->spirv = std::move(spirv);
- }
- break;
+ std::vector<uint32_t> spirv;
+ DAWN_TRY_ASSIGN(spirv, ModuleToSPIRV(&program));
+ DAWN_TRY(ValidateSpirv(spirv.data(), spirv.size()));
+
+ parseResult->spirv = std::move(spirv);
}
- default:
- return DAWN_VALIDATION_ERROR("Unsupported sType");
}
return {};
@@ -1160,8 +1144,10 @@ namespace dawn_native {
ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
const tint::Program* program,
+ const tint::transform::DataMap& inputs,
+ tint::transform::DataMap* outputs,
OwnedCompilationMessages* outMessages) {
- tint::transform::Transform::Output output = transform->Run(program);
+ tint::transform::Output output = transform->Run(program, inputs);
if (outMessages != nullptr) {
outMessages->AddMessages(output.program.Diagnostics());
}
@@ -1169,13 +1155,16 @@ namespace dawn_native {
std::string err = "Tint program failure: " + output.program.Diagnostics().str();
return DAWN_VALIDATION_ERROR(err.c_str());
}
+ if (outputs != nullptr) {
+ *outputs = std::move(output.data);
+ }
return std::move(output.program);
}
- std::unique_ptr<tint::transform::VertexPulling> MakeVertexPullingTransform(
- const VertexState& vertexState,
- const std::string& entryPoint,
- BindGroupIndex pullingBufferBindingSet) {
+ void AddVertexPullingTransformConfig(const VertexState& vertexState,
+ const std::string& entryPoint,
+ BindGroupIndex pullingBufferBindingSet,
+ tint::transform::DataMap* transformInputs) {
tint::transform::VertexPulling::Config cfg;
cfg.entry_point_name = entryPoint;
cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
@@ -1197,7 +1186,7 @@ namespace dawn_native {
cfg.vertex_state.push_back(std::move(layout));
}
- return std::make_unique<tint::transform::VertexPulling>(cfg);
+ transformInputs->Add<tint::transform::VertexPulling::Config>(cfg);
}
MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
@@ -1225,23 +1214,18 @@ namespace dawn_native {
ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
: CachedObject(device), mType(Type::Undefined) {
ASSERT(descriptor->nextInChain != nullptr);
- switch (descriptor->nextInChain->sType) {
- case wgpu::SType::ShaderModuleSPIRVDescriptor: {
- mType = Type::Spirv;
- const auto* spirvDesc =
- static_cast<const ShaderModuleSPIRVDescriptor*>(descriptor->nextInChain);
- mOriginalSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
- break;
- }
- case wgpu::SType::ShaderModuleWGSLDescriptor: {
- mType = Type::Wgsl;
- const auto* wgslDesc =
- static_cast<const ShaderModuleWGSLDescriptor*>(descriptor->nextInChain);
- mWgsl = std::string(wgslDesc->source);
- break;
- }
- default:
- UNREACHABLE();
+ const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &spirvDesc);
+ const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &wgslDesc);
+ ASSERT(spirvDesc || wgslDesc);
+
+ if (spirvDesc) {
+ mType = Type::Spirv;
+ mOriginalSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ } else if (wgslDesc) {
+ mType = Type::Wgsl;
+ mWgsl = std::string(wgslDesc->source);
}
}
@@ -1330,19 +1314,27 @@ namespace dawn_native {
errorStream << "Tint vertex pulling failure:" << std::endl;
tint::transform::Manager transformManager;
- transformManager.append(
- MakeVertexPullingTransform(vertexState, entryPoint, pullingBufferBindingSet));
- transformManager.append(std::make_unique<tint::transform::EmitVertexPointSize>());
- transformManager.append(std::make_unique<tint::transform::Spirv>());
+ transformManager.Add<tint::transform::VertexPulling>();
+ transformManager.Add<tint::transform::Spirv>();
if (GetDevice()->IsRobustnessEnabled()) {
- transformManager.append(std::make_unique<tint::transform::BoundArrayAccessors>());
+ transformManager.Add<tint::transform::BoundArrayAccessors>();
}
+ tint::transform::DataMap transformInputs;
+
+ tint::transform::Spirv::Config spirv_cfg;
+ spirv_cfg.emit_vertex_point_size = true;
+ transformInputs.Add<tint::transform::Spirv::Config>(spirv_cfg);
+
+ AddVertexPullingTransformConfig(vertexState, entryPoint, pullingBufferBindingSet,
+ &transformInputs);
+
// A nullptr is passed in for the CompilationMessages here since this method is called
- // during RenderPipeline creation, by which point the shader module's CompilationInfo may
- // have already been queried.
+ // during RenderPipeline creation, by which point the shader module's CompilationInfo
+ // may have already been queried.
tint::Program program;
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, programIn, nullptr));
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, programIn, transformInputs,
+ nullptr, nullptr));
tint::writer::spirv::Generator generator(&program);
if (!generator.Generate()) {
@@ -1357,6 +1349,7 @@ namespace dawn_native {
MaybeError ShaderModuleBase::InitializeBase(ShaderModuleParseResult* parseResult) {
mTintProgram = std::move(parseResult->tintProgram);
+ mTintSource = std::move(parseResult->tintSource);
mSpirv = std::move(parseResult->spirv);
mCompilationMessages = std::move(parseResult->compilationMessages);
@@ -1393,4 +1386,11 @@ namespace dawn_native {
return std::move(result);
}
+ size_t PipelineLayoutEntryPointPairHashFunc::operator()(
+ const PipelineLayoutEntryPointPair& pair) const {
+ size_t hash = 0;
+ HashCombine(&hash, pair.first, pair.second);
+ return hash;
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
index 2e8c3af9499..b5d949f10e2 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
@@ -37,6 +37,7 @@ namespace tint {
class Program;
namespace transform {
+ class DataMap;
class Transform;
class VertexPulling;
} // namespace transform
@@ -51,10 +52,18 @@ namespace dawn_native {
struct EntryPointMetadata;
+ using PipelineLayoutEntryPointPair = std::pair<PipelineLayoutBase*, std::string>;
+ struct PipelineLayoutEntryPointPairHashFunc {
+ size_t operator()(const PipelineLayoutEntryPointPair& pair) const;
+ };
+
// A map from name to EntryPointMetadata.
using EntryPointMetadataTable =
std::unordered_map<std::string, std::unique_ptr<EntryPointMetadata>>;
+ // Source for a tint program
+ class TintSource;
+
struct ShaderModuleParseResult {
ShaderModuleParseResult();
~ShaderModuleParseResult();
@@ -64,6 +73,7 @@ namespace dawn_native {
bool HasParsedShader() const;
std::unique_ptr<tint::Program> tintProgram;
+ std::unique_ptr<TintSource> tintSource;
std::vector<uint32_t> spirv;
std::unique_ptr<OwnedCompilationMessages> compilationMessages;
};
@@ -79,12 +89,15 @@ namespace dawn_native {
const PipelineLayoutBase* layout);
ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
const tint::Program* program,
+ const tint::transform::DataMap& inputs,
+ tint::transform::DataMap* outputs,
OwnedCompilationMessages* messages);
- std::unique_ptr<tint::transform::VertexPulling> MakeVertexPullingTransform(
- const VertexState& vertexState,
- const std::string& entryPoint,
- BindGroupIndex pullingBufferBindingSet);
+ /// Creates and adds the tint::transform::VertexPulling::Config to transformInputs.
+ void AddVertexPullingTransformConfig(const VertexState& vertexState,
+ const std::string& entryPoint,
+ BindGroupIndex pullingBufferBindingSet,
+ tint::transform::DataMap* transformInputs);
// Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
// stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
@@ -164,7 +177,7 @@ namespace dawn_native {
const std::string& entryPoint,
BindGroupIndex pullingBufferBindingSet) const;
- OwnedCompilationMessages* CompilationMessages() {
+ OwnedCompilationMessages* GetCompilationMessages() {
return mCompilationMessages.get();
}
@@ -190,6 +203,7 @@ namespace dawn_native {
EntryPointMetadataTable mEntryPoints;
std::vector<uint32_t> mSpirv;
std::unique_ptr<tint::Program> mTintProgram;
+ std::unique_ptr<TintSource> mTintSource; // Keep the tint::Source::File alive
std::unique_ptr<OwnedCompilationMessages> mCompilationMessages;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface.cpp b/chromium/third_party/dawn/src/dawn_native/Surface.cpp
index 4afe05ed454..d78f3f3cf3c 100644
--- a/chromium/third_party/dawn/src/dawn_native/Surface.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Surface.cpp
@@ -15,12 +15,13 @@
#include "dawn_native/Surface.h"
#include "common/Platform.h"
+#include "dawn_native/ChainUtils_autogen.h"
#include "dawn_native/Instance.h"
#include "dawn_native/SwapChain.h"
#if defined(DAWN_PLATFORM_WINDOWS)
-# include "common/windows_with_undefs.h"
-#endif // DAWN_PLATFORM_WINDOWS
+# include <windows.ui.core.h>
+#endif // defined(DAWN_PLATFORM_WINDOWS)
#if defined(DAWN_USE_X11)
# include "common/xlib_with_undefs.h"
@@ -34,75 +35,75 @@ namespace dawn_native {
MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
const SurfaceDescriptor* descriptor) {
- // TODO(cwallez@chromium.org): Have some type of helper to iterate over all the chained
- // structures.
if (descriptor->nextInChain == nullptr) {
return DAWN_VALIDATION_ERROR("Surface cannot be created with just the base descriptor");
}
- const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
- if (chainedDescriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("Cannot specify two windows for a single surface");
- }
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+ wgpu::SType::SurfaceDescriptorFromMetalLayer,
+ wgpu::SType::SurfaceDescriptorFromWindowsHWND,
+ wgpu::SType::SurfaceDescriptorFromWindowsCoreWindow,
+ wgpu::SType::SurfaceDescriptorFromXlib));
- switch (chainedDescriptor->sType) {
#if defined(DAWN_ENABLE_BACKEND_METAL)
- case wgpu::SType::SurfaceDescriptorFromMetalLayer: {
- const SurfaceDescriptorFromMetalLayer* metalDesc =
- static_cast<const SurfaceDescriptorFromMetalLayer*>(chainedDescriptor);
-
- // Check that the layer is a CAMetalLayer (or a derived class).
- if (!InheritsFromCAMetalLayer(metalDesc->layer)) {
- return DAWN_VALIDATION_ERROR("layer must be a CAMetalLayer");
- }
- break;
- }
+ const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &metalDesc);
+ if (!metalDesc) {
+ return DAWN_VALIDATION_ERROR("Unsupported sType");
+ }
+ // Check that the layer is a CAMetalLayer (or a derived class).
+ if (!InheritsFromCAMetalLayer(metalDesc->layer)) {
+ return DAWN_VALIDATION_ERROR("layer must be a CAMetalLayer");
+ }
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
#if defined(DAWN_PLATFORM_WINDOWS)
- case wgpu::SType::SurfaceDescriptorFromWindowsHWND: {
- const SurfaceDescriptorFromWindowsHWND* hwndDesc =
- static_cast<const SurfaceDescriptorFromWindowsHWND*>(chainedDescriptor);
-
- // It is not possible to validate an HINSTANCE.
-
- // Validate the hwnd using the windows.h IsWindow function.
- if (IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0) {
- return DAWN_VALIDATION_ERROR("Invalid HWND");
- }
- break;
+# if defined(DAWN_PLATFORM_WIN32)
+ const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &hwndDesc);
+ if (hwndDesc) {
+ if (IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0) {
+ return DAWN_VALIDATION_ERROR("Invalid HWND");
}
+ return {};
+ }
+# endif // defined(DAWN_PLATFORM_WIN32)
+ const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &coreWindowDesc);
+ if (coreWindowDesc) {
+ // Validate the coreWindow by query for ICoreWindow interface
+ ComPtr<ABI::Windows::UI::Core::ICoreWindow> coreWindow;
+ if (coreWindowDesc->coreWindow == nullptr ||
+ FAILED(static_cast<IUnknown*>(coreWindowDesc->coreWindow)
+ ->QueryInterface(IID_PPV_ARGS(&coreWindow)))) {
+ return DAWN_VALIDATION_ERROR("Invalid CoreWindow");
+ }
+ return {};
+ }
+ return DAWN_VALIDATION_ERROR("Unsupported sType");
#endif // defined(DAWN_PLATFORM_WINDOWS)
#if defined(DAWN_USE_X11)
- case wgpu::SType::SurfaceDescriptorFromXlib: {
- const SurfaceDescriptorFromXlib* xDesc =
- static_cast<const SurfaceDescriptorFromXlib*>(chainedDescriptor);
-
- // It is not possible to validate an X Display.
-
- // Check the validity of the window by calling a getter function on the window that
- // returns a status code. If the window is bad the call return a status of zero. We
- // need to set a temporary X11 error handler while doing this because the default
- // X11 error handler exits the program on any error.
- XErrorHandler oldErrorHandler =
- XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
- XWindowAttributes attributes;
- int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display),
- xDesc->window, &attributes);
- XSetErrorHandler(oldErrorHandler);
-
- if (status == 0) {
- return DAWN_VALIDATION_ERROR("Invalid X Window");
- }
- break;
- }
-#endif // defined(DAWN_USE_X11)
-
- case wgpu::SType::SurfaceDescriptorFromCanvasHTMLSelector:
- default:
- return DAWN_VALIDATION_ERROR("Unsupported sType");
+ const SurfaceDescriptorFromXlib* xDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &xDesc);
+ if (!xDesc) {
+ return DAWN_VALIDATION_ERROR("Unsupported sType");
}
+ // Check the validity of the window by calling a getter function on the window that
+ // returns a status code. If the window is bad the call return a status of zero. We
+ // need to set a temporary X11 error handler while doing this because the default
+ // X11 error handler exits the program on any error.
+ XErrorHandler oldErrorHandler =
+ XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
+ XWindowAttributes attributes;
+ int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display),
+ xDesc->window, &attributes);
+ XSetErrorHandler(oldErrorHandler);
+
+ if (status == 0) {
+ return DAWN_VALIDATION_ERROR("Invalid X Window");
+ }
+#endif // defined(DAWN_USE_X11)
return {};
}
@@ -110,37 +111,33 @@ namespace dawn_native {
Surface::Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor)
: mInstance(instance) {
ASSERT(descriptor->nextInChain != nullptr);
- const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
-
- switch (chainedDescriptor->sType) {
- case wgpu::SType::SurfaceDescriptorFromMetalLayer: {
- const SurfaceDescriptorFromMetalLayer* metalDesc =
- static_cast<const SurfaceDescriptorFromMetalLayer*>(chainedDescriptor);
- mType = Type::MetalLayer;
- mMetalLayer = metalDesc->layer;
- break;
- }
-
- case wgpu::SType::SurfaceDescriptorFromWindowsHWND: {
- const SurfaceDescriptorFromWindowsHWND* hwndDesc =
- static_cast<const SurfaceDescriptorFromWindowsHWND*>(chainedDescriptor);
- mType = Type::WindowsHWND;
- mHInstance = hwndDesc->hinstance;
- mHWND = hwndDesc->hwnd;
- break;
- }
-
- case wgpu::SType::SurfaceDescriptorFromXlib: {
- const SurfaceDescriptorFromXlib* xDesc =
- static_cast<const SurfaceDescriptorFromXlib*>(chainedDescriptor);
- mType = Type::Xlib;
- mXDisplay = xDesc->display;
- mXWindow = xDesc->window;
- break;
- }
-
- default:
- UNREACHABLE();
+ const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+ const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+ const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+ const SurfaceDescriptorFromXlib* xDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &metalDesc);
+ FindInChain(descriptor->nextInChain, &hwndDesc);
+ FindInChain(descriptor->nextInChain, &coreWindowDesc);
+ FindInChain(descriptor->nextInChain, &xDesc);
+ ASSERT(metalDesc || hwndDesc || xDesc);
+ if (metalDesc) {
+ mType = Type::MetalLayer;
+ mMetalLayer = metalDesc->layer;
+ } else if (hwndDesc) {
+ mType = Type::WindowsHWND;
+ mHInstance = hwndDesc->hinstance;
+ mHWND = hwndDesc->hwnd;
+ } else if (coreWindowDesc) {
+#if defined(DAWN_PLATFORM_WINDOWS)
+ mType = Type::WindowsCoreWindow;
+ mCoreWindow = static_cast<IUnknown*>(coreWindowDesc->coreWindow);
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+ } else if (xDesc) {
+ mType = Type::Xlib;
+ mXDisplay = xDesc->display;
+ mXWindow = xDesc->window;
+ } else {
+ UNREACHABLE();
}
}
@@ -181,6 +178,15 @@ namespace dawn_native {
return mHWND;
}
+ IUnknown* Surface::GetCoreWindow() const {
+ ASSERT(mType == Type::WindowsCoreWindow);
+#if defined(DAWN_PLATFORM_WINDOWS)
+ return mCoreWindow.Get();
+#else
+ return nullptr;
+#endif
+ }
+
void* Surface::GetXDisplay() const {
ASSERT(mType == Type::Xlib);
return mXDisplay;
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface.h b/chromium/third_party/dawn/src/dawn_native/Surface.h
index 5863109024c..bdab2b45bc3 100644
--- a/chromium/third_party/dawn/src/dawn_native/Surface.h
+++ b/chromium/third_party/dawn/src/dawn_native/Surface.h
@@ -21,6 +21,17 @@
#include "dawn_native/dawn_platform.h"
+#include "common/Platform.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# include "dawn_native/d3d12/d3d12_platform.h"
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+// Forward declare IUnknown
+// GetCoreWindow needs to return an IUnknown pointer
+// non-windows platforms don't have this type
+struct IUnknown;
+
namespace dawn_native {
MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
@@ -39,7 +50,7 @@ namespace dawn_native {
NewSwapChainBase* GetAttachedSwapChain();
// These are valid to call on all Surfaces.
- enum class Type { MetalLayer, WindowsHWND, Xlib };
+ enum class Type { MetalLayer, WindowsHWND, WindowsCoreWindow, Xlib };
Type GetType() const;
InstanceBase* GetInstance();
@@ -50,6 +61,9 @@ namespace dawn_native {
void* GetHInstance() const;
void* GetHWND() const;
+ // Valid to call if the type is WindowsCoreWindow
+ IUnknown* GetCoreWindow() const;
+
// Valid to call if the type is WindowsXlib
void* GetXDisplay() const;
uint32_t GetXWindow() const;
@@ -70,6 +84,11 @@ namespace dawn_native {
void* mHInstance = nullptr;
void* mHWND = nullptr;
+#if defined(DAWN_PLATFORM_WINDOWS)
+ // WindowsCoreWindow
+ ComPtr<IUnknown> mCoreWindow;
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
// Xlib
void* mXDisplay = nullptr;
uint32_t mXWindow = 0;
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.cpp b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
index 55eb68fb1f1..0903e629575 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
@@ -248,26 +248,6 @@ namespace dawn_native {
} // anonymous namespace
- MaybeError FixUpDeprecatedGPUExtent3DDepth(DeviceBase* device, Extent3D* extent) {
- if (extent->depth != 1) {
- // deprecated depth is assigned
- if (extent->depthOrArrayLayers != 1) {
- // both deprecated and updated API is used
- return DAWN_VALIDATION_ERROR(
- "Deprecated GPUExtent3D.depth and updated GPUExtent3D.depthOrArrayLengths are "
- "both assigned.");
- }
-
- extent->depthOrArrayLayers = extent->depth;
-
- device->EmitDeprecationWarning(
- "GPUExtent3D.depth is deprecated. Please use GPUExtent3D.depthOrArrayLayers "
- "instead.");
- }
-
- return {};
- }
-
MaybeError ValidateTextureDescriptor(const DeviceBase* device,
const TextureDescriptor* descriptor) {
if (descriptor->nextInChain != nullptr) {
@@ -301,12 +281,20 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Compressed texture must be 2D");
}
+ // Depth/stencil formats are valid for 2D textures only. Metal has this limit. And D3D12
+ // doesn't support depth/stencil formats on 3D textures.
+ if (descriptor->dimension != wgpu::TextureDimension::e2D &&
+ (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
+ return DAWN_VALIDATION_ERROR("Depth/stencil formats are valid for 2D textures only");
+ }
+
DAWN_TRY(ValidateTextureSize(descriptor, format));
return {};
}
- MaybeError ValidateTextureViewDescriptor(const TextureBase* texture,
+ MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+ const TextureBase* texture,
const TextureViewDescriptor* descriptor) {
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
@@ -324,6 +312,13 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("1D texture views aren't supported (yet).");
}
+ // Disallow 3D views as unsafe until they are fully implemented.
+ if (descriptor->dimension == wgpu::TextureViewDimension::e3D &&
+ device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+ return DAWN_VALIDATION_ERROR(
+ "3D views are disallowed because they are not fully implemented");
+ }
+
DAWN_TRY(ValidateTextureFormat(descriptor->format));
DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
@@ -426,7 +421,7 @@ namespace dawn_native {
mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
// Add readonly storage usage if the texture has a storage usage. The validation rules in
- // ValidatePassResourceUsage will make sure we don't use both at the same time.
+ // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
if (mUsage & wgpu::TextureUsage::Storage) {
mUsage |= kReadOnlyStorageTexture;
}
@@ -611,7 +606,13 @@ namespace dawn_native {
}
TextureViewBase* TextureBase::APICreateView(const TextureViewDescriptor* descriptor) {
- return GetDevice()->CreateTextureView(this, descriptor);
+ DeviceBase* device = GetDevice();
+
+ Ref<TextureViewBase> result;
+ if (device->ConsumedError(device->CreateTextureView(this, descriptor), &result)) {
+ return TextureViewBase::MakeError(device);
+ }
+ return result.Detach();
}
void TextureBase::APIDestroy() {
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.h b/chromium/third_party/dawn/src/dawn_native/Texture.h
index 7ae39463f65..a803ddadb12 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.h
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.h
@@ -30,7 +30,8 @@ namespace dawn_native {
MaybeError ValidateTextureDescriptor(const DeviceBase* device,
const TextureDescriptor* descriptor);
- MaybeError ValidateTextureViewDescriptor(const TextureBase* texture,
+ MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+ const TextureBase* texture,
const TextureViewDescriptor* descriptor);
TextureViewDescriptor GetTextureViewDescriptorWithDefaults(
const TextureBase* texture,
@@ -38,8 +39,6 @@ namespace dawn_native {
bool IsValidSampleCount(uint32_t sampleCount);
- MaybeError FixUpDeprecatedGPUExtent3DDepth(DeviceBase* device, Extent3D* extent);
-
static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::Sampled | kReadOnlyStorageTexture;
diff --git a/chromium/third_party/dawn/src/dawn_native/ToBackend.h b/chromium/third_party/dawn/src/dawn_native/ToBackend.h
index 3cc071580c2..5b0f049894b 100644
--- a/chromium/third_party/dawn/src/dawn_native/ToBackend.h
+++ b/chromium/third_party/dawn/src/dawn_native/ToBackend.h
@@ -127,6 +127,12 @@ namespace dawn_native {
}
template <typename BackendTraits, typename T>
+ Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&& ToBackendBase(Ref<T>&& common) {
+ return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&&>(
+ common);
+ }
+
+ template <typename BackendTraits, typename T>
const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(
const Ref<T>& common) {
return reinterpret_cast<
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
index 1455d129383..89adf699ed9 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
@@ -52,6 +52,10 @@ namespace dawn_native { namespace d3d12 {
return mD3d12Device;
}
+ const gpu_info::D3DDriverVersion& Adapter::GetDriverVersion() const {
+ return mDriverVersion;
+ }
+
MaybeError Adapter::Initialize() {
// D3D12 cannot check for feature support without a device.
// Create the device to populate the adapter properties then reuse it when needed for actual
@@ -88,10 +92,10 @@ namespace dawn_native { namespace d3d12 {
std::ostringstream o;
o << "D3D12 driver version ";
- o << ((encodedVersion >> 48) & 0xFFFF) << ".";
- o << ((encodedVersion >> 32) & 0xFFFF) << ".";
- o << ((encodedVersion >> 16) & 0xFFFF) << ".";
- o << (encodedVersion & 0xFFFF);
+ for (size_t i = 0; i < mDriverVersion.size(); ++i) {
+ mDriverVersion[i] = (encodedVersion >> (48 - 16 * i)) & 0xFFFF;
+ o << mDriverVersion[i] << ".";
+ }
mDriverDescription = o.str();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
index 250a7012733..f41b3a1d372 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
@@ -17,6 +17,7 @@
#include "dawn_native/Adapter.h"
+#include "common/GPUInfo.h"
#include "dawn_native/d3d12/D3D12Info.h"
#include "dawn_native/d3d12/d3d12_platform.h"
@@ -33,6 +34,7 @@ namespace dawn_native { namespace d3d12 {
IDXGIAdapter3* GetHardwareAdapter() const;
Backend* GetBackend() const;
ComPtr<ID3D12Device> GetDevice() const;
+ const gpu_info::D3DDriverVersion& GetDriverVersion() const;
MaybeError Initialize();
@@ -46,6 +48,7 @@ namespace dawn_native { namespace d3d12 {
ComPtr<IDXGIAdapter3> mHardwareAdapter;
ComPtr<ID3D12Device> mD3d12Device;
+ gpu_info::D3DDriverVersion mDriverVersion;
Backend* mBackend;
D3D12DeviceInfo mDeviceInfo = {};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
index 57548c7ef81..3e5c15b5b84 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
@@ -39,10 +39,8 @@ namespace dawn_native { namespace d3d12 {
functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
ASSERT(debugController != nullptr);
debugController->EnableDebugLayer();
- debugController->SetEnableGPUBasedValidation(true);
- if (validationLevel == BackendValidationLevel::Partial) {
- debugController->SetGPUBasedValidationFlags(
- D3D12_GPU_BASED_VALIDATION_FLAGS_DISABLE_STATE_TRACKING);
+ if (validationLevel == BackendValidationLevel::Full) {
+ debugController->SetEnableGPUBasedValidation(true);
}
// Enable additional debug layers.
@@ -52,7 +50,8 @@ namespace dawn_native { namespace d3d12 {
if (beginCaptureOnStartup) {
ComPtr<IDXGraphicsAnalysis> graphicsAnalysis;
- if (SUCCEEDED(functions->dxgiGetDebugInterface1(
+ if (functions->dxgiGetDebugInterface1 != nullptr &&
+ SUCCEEDED(functions->dxgiGetDebugInterface1(
0, IID_PPV_ARGS(&graphicsAnalysis)))) {
graphicsAnalysis->BeginCapture();
}
@@ -102,34 +101,49 @@ namespace dawn_native { namespace d3d12 {
return mFactory;
}
- ResultOrError<IDxcLibrary*> Backend::GetOrCreateDxcLibrary() {
+ MaybeError Backend::EnsureDxcLibrary() {
if (mDxcLibrary == nullptr) {
DAWN_TRY(CheckHRESULT(
mFunctions->dxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&mDxcLibrary)),
"DXC create library"));
ASSERT(mDxcLibrary != nullptr);
}
- return mDxcLibrary.Get();
+ return {};
}
- ResultOrError<IDxcCompiler*> Backend::GetOrCreateDxcCompiler() {
+ MaybeError Backend::EnsureDxcCompiler() {
if (mDxcCompiler == nullptr) {
DAWN_TRY(CheckHRESULT(
mFunctions->dxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&mDxcCompiler)),
"DXC create compiler"));
ASSERT(mDxcCompiler != nullptr);
}
- return mDxcCompiler.Get();
+ return {};
}
- ResultOrError<IDxcValidator*> Backend::GetOrCreateDxcValidator() {
+ MaybeError Backend::EnsureDxcValidator() {
if (mDxcValidator == nullptr) {
DAWN_TRY(CheckHRESULT(
mFunctions->dxcCreateInstance(CLSID_DxcValidator, IID_PPV_ARGS(&mDxcValidator)),
"DXC create validator"));
ASSERT(mDxcValidator != nullptr);
}
- return mDxcValidator.Get();
+ return {};
+ }
+
+ ComPtr<IDxcLibrary> Backend::GetDxcLibrary() const {
+ ASSERT(mDxcLibrary != nullptr);
+ return mDxcLibrary;
+ }
+
+ ComPtr<IDxcCompiler> Backend::GetDxcCompiler() const {
+ ASSERT(mDxcCompiler != nullptr);
+ return mDxcCompiler;
+ }
+
+ ComPtr<IDxcValidator> Backend::GetDxcValidator() const {
+ ASSERT(mDxcValidator != nullptr);
+ return mDxcValidator;
}
const PlatformFunctions* Backend::GetFunctions() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
index 0490b601750..17f77ccec35 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
@@ -30,9 +30,14 @@ namespace dawn_native { namespace d3d12 {
MaybeError Initialize();
ComPtr<IDXGIFactory4> GetFactory() const;
- ResultOrError<IDxcLibrary*> GetOrCreateDxcLibrary();
- ResultOrError<IDxcCompiler*> GetOrCreateDxcCompiler();
- ResultOrError<IDxcValidator*> GetOrCreateDxcValidator();
+
+ MaybeError EnsureDxcLibrary();
+ MaybeError EnsureDxcCompiler();
+ MaybeError EnsureDxcValidator();
+ ComPtr<IDxcLibrary> GetDxcLibrary() const;
+ ComPtr<IDxcCompiler> GetDxcCompiler() const;
+ ComPtr<IDxcValidator> GetDxcValidator() const;
+
const PlatformFunctions* GetFunctions() const;
std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
index 8d0056adf6b..b0b8e463ae8 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
@@ -139,10 +139,10 @@ namespace dawn_native { namespace d3d12 {
return false;
}
- void RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
- const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize) {
+ MaybeError RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize) {
ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
ASSERT(srcCopy.aspect == dstCopy.aspect);
dawn_native::Format format = srcCopy.texture->GetFormat();
@@ -166,9 +166,9 @@ namespace dawn_native { namespace d3d12 {
tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
tempBufferDescriptor.size = tempBufferSize.AcquireSuccess();
Device* device = ToBackend(srcCopy.texture->GetDevice());
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<Buffer> tempBuffer =
- AcquireRef(ToBackend(device->APICreateBuffer(&tempBufferDescriptor)));
+ Ref<BufferBase> tempBufferBase;
+ DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
+ Ref<Buffer> tempBuffer = ToBackend(std::move(tempBufferBase));
// Copy from source texture into tempBuffer
Texture* srcTexture = ToBackend(srcCopy.texture).Get();
@@ -190,7 +190,65 @@ namespace dawn_native { namespace d3d12 {
// Save tempBuffer into recordingContext
recordingContext->AddToTempBuffers(std::move(tempBuffer));
+
+ return {};
}
+
+ // Records the necessary barriers for a synchronization scope using the resource usage
+ // data pre-computed in the frontend. Also performs lazy initialization if required.
+ // Returns whether any UAV are used in the synchronization scope.
+ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
+ const SyncScopeResourceUsage& usages) {
+ std::vector<D3D12_RESOURCE_BARRIER> barriers;
+
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
+
+ for (size_t i = 0; i < usages.buffers.size(); ++i) {
+ Buffer* buffer = ToBackend(usages.buffers[i]);
+
+ // TODO(jiawei.shao@intel.com): clear storage buffers with
+ // ClearUnorderedAccessView*().
+ buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
+
+ D3D12_RESOURCE_BARRIER barrier;
+ if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
+ usages.bufferUsages[i])) {
+ barriers.push_back(barrier);
+ }
+ bufferUsages |= usages.bufferUsages[i];
+ }
+
+ wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
+
+ for (size_t i = 0; i < usages.textures.size(); ++i) {
+ Texture* texture = ToBackend(usages.textures[i]);
+
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ usages.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
+ }
+ textureUsages |= usage;
+ });
+
+ ToBackend(usages.textures[i])
+ ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
+ usages.textureUsages[i]);
+ }
+
+ if (barriers.size()) {
+ commandList->ResourceBarrier(barriers.size(), barriers.data());
+ }
+
+ return (bufferUsages & wgpu::BufferUsage::Storage ||
+ textureUsages & wgpu::TextureUsage::Storage);
+ }
+
} // anonymous namespace
class BindGroupStateTracker : public BindGroupTrackerBase<false, uint64_t> {
@@ -272,81 +330,6 @@ namespace dawn_native { namespace d3d12 {
mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
}
- if (mInCompute) {
- std::vector<D3D12_RESOURCE_BARRIER> barriers;
- for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
- BindGroupLayoutBase* layout = mBindGroups[index]->GetLayout();
- for (BindingIndex binding{0}; binding < layout->GetBindingCount(); ++binding) {
- const BindingInfo& bindingInfo = layout->GetBindingInfo(binding);
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- D3D12_RESOURCE_BARRIER barrier;
- wgpu::BufferUsage usage;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- usage = wgpu::BufferUsage::Uniform;
- break;
- case wgpu::BufferBindingType::Storage:
- usage = wgpu::BufferUsage::Storage;
- break;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- usage = kReadOnlyStorageBuffer;
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- if (ToBackend(mBindGroups[index]
- ->GetBindingAsBufferBinding(binding)
- .buffer)
- ->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
- usage)) {
- barriers.push_back(barrier);
- }
- break;
- }
-
- case BindingInfoType::StorageTexture: {
- TextureViewBase* view =
- mBindGroups[index]->GetBindingAsTextureView(binding);
- wgpu::TextureUsage usage;
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::ReadOnly:
- usage = kReadOnlyStorageTexture;
- break;
- case wgpu::StorageTextureAccess::WriteOnly:
- usage = wgpu::TextureUsage::Storage;
- break;
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
- }
- ToBackend(view->GetTexture())
- ->TransitionUsageAndGetResourceBarrier(
- commandContext, &barriers, usage,
- view->GetSubresourceRange());
- break;
- }
-
- case BindingInfoType::Texture: {
- TextureViewBase* view =
- mBindGroups[index]->GetBindingAsTextureView(binding);
- ToBackend(view->GetTexture())
- ->TransitionUsageAndGetResourceBarrier(
- commandContext, &barriers, wgpu::TextureUsage::Sampled,
- view->GetSubresourceRange());
- break;
- }
-
- case BindingInfoType::Sampler:
- // Don't require barriers.
- break;
- }
- }
- }
-
- if (!barriers.empty()) {
- commandList->ResourceBarrier(barriers.size(), barriers.data());
- }
- }
DidApply();
return {};
@@ -608,80 +591,8 @@ namespace dawn_native { namespace d3d12 {
// actual command list but here is ok because there should be few command buffers.
bindingTracker.SetID3D12DescriptorHeaps(commandList);
- // Records the necessary barriers for the resource usage pre-computed by the frontend
- auto PrepareResourcesForRenderPass = [](CommandRecordingContext* commandContext,
- const PassResourceUsage& usages) -> bool {
- std::vector<D3D12_RESOURCE_BARRIER> barriers;
-
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
-
- for (size_t i = 0; i < usages.buffers.size(); ++i) {
- Buffer* buffer = ToBackend(usages.buffers[i]);
-
- // TODO(jiawei.shao@intel.com): clear storage buffers with
- // ClearUnorderedAccessView*().
- buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
-
- D3D12_RESOURCE_BARRIER barrier;
- if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
- usages.bufferUsages[i])) {
- barriers.push_back(barrier);
- }
- bufferUsages |= usages.bufferUsages[i];
- }
-
- wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
-
- for (size_t i = 0; i < usages.textures.size(); ++i) {
- Texture* texture = ToBackend(usages.textures[i]);
-
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- usages.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(commandContext, range);
- }
- textureUsages |= usage;
- });
-
- ToBackend(usages.textures[i])
- ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
- usages.textureUsages[i]);
- }
-
- if (barriers.size()) {
- commandList->ResourceBarrier(barriers.size(), barriers.data());
- }
-
- return (bufferUsages & wgpu::BufferUsage::Storage ||
- textureUsages & wgpu::TextureUsage::Storage);
- };
-
- // TODO(jiawei.shao@intel.com): move the resource lazy clearing inside the barrier tracking
- // for compute passes.
- auto PrepareResourcesForComputePass = [](CommandRecordingContext* commandContext,
- const PassResourceUsage& usages) -> void {
- for (size_t i = 0; i < usages.buffers.size(); ++i) {
- Buffer* buffer = ToBackend(usages.buffers[i]);
-
- // TODO(jiawei.shao@intel.com): clear storage buffers with
- // ClearUnorderedAccessView*().
- buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
- }
-
- for (size_t i = 0; i < usages.textures.size(); ++i) {
- Texture* texture = ToBackend(usages.textures[i]);
- texture->EnsureSubresourceContentInitialized(commandContext,
- texture->GetAllSubresources());
- }
- };
-
- const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
- uint32_t nextPassNumber = 0;
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
Command type;
while (mCommands.NextCommandId(&type)) {
@@ -689,12 +600,12 @@ namespace dawn_native { namespace d3d12 {
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
- PrepareResourcesForComputePass(commandContext,
- passResourceUsages[nextPassNumber]);
bindingTracker.SetInComputePass(true);
- DAWN_TRY(RecordComputePass(commandContext, &bindingTracker));
+ DAWN_TRY(RecordComputePass(
+ commandContext, &bindingTracker,
+ GetResourceUsages().computePasses[nextComputePassNumber]));
- nextPassNumber++;
+ nextComputePassNumber++;
break;
}
@@ -702,15 +613,15 @@ namespace dawn_native { namespace d3d12 {
BeginRenderPassCmd* beginRenderPassCmd =
mCommands.NextCommand<BeginRenderPassCmd>();
- const bool passHasUAV = PrepareResourcesForRenderPass(
- commandContext, passResourceUsages[nextPassNumber]);
+ const bool passHasUAV = TransitionAndClearForSyncScope(
+ commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
bindingTracker.SetInComputePass(false);
LazyClearRenderPassAttachments(beginRenderPassCmd);
DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, beginRenderPassCmd,
passHasUAV));
- nextPassNumber++;
+ nextRenderPassNumber++;
break;
}
@@ -824,8 +735,8 @@ namespace dawn_native { namespace d3d12 {
ASSERT(srcRange.aspects == dstRange.aspects);
if (ShouldCopyUsingTemporaryBuffer(GetDevice(), copy->source,
copy->destination)) {
- RecordCopyTextureWithTemporaryBuffer(commandContext, copy->source,
- copy->destination, copy->copySize);
+ DAWN_TRY(RecordCopyTextureWithTemporaryBuffer(
+ commandContext, copy->source, copy->destination, copy->copySize));
break;
}
@@ -840,17 +751,17 @@ namespace dawn_native { namespace d3d12 {
copy->copySize.width, copy->copySize.height, 1u};
for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
- for (uint32_t slice = 0; slice < copy->copySize.depthOrArrayLayers;
- ++slice) {
+ for (uint32_t layer = 0; layer < copy->copySize.depthOrArrayLayers;
+ ++layer) {
D3D12_TEXTURE_COPY_LOCATION srcLocation =
ComputeTextureCopyLocationForTexture(
source, copy->source.mipLevel,
- copy->source.origin.z + slice, aspect);
+ copy->source.origin.z + layer, aspect);
D3D12_TEXTURE_COPY_LOCATION dstLocation =
ComputeTextureCopyLocationForTexture(
destination, copy->destination.mipLevel,
- copy->destination.origin.z + slice, aspect);
+ copy->destination.origin.z + layer, aspect);
Origin3D sourceOriginInSubresource = copy->source.origin;
sourceOriginInSubresource.z = 0;
@@ -943,7 +854,9 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker) {
+ BindGroupStateTracker* bindingTracker,
+ const ComputePassResourceUsage& resourceUsages) {
+ uint64_t currentDispatch = 0;
PipelineLayout* lastLayout = nullptr;
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
@@ -953,21 +866,28 @@ namespace dawn_native { namespace d3d12 {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+ TransitionAndClearForSyncScope(commandContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
DAWN_TRY(bindingTracker->Apply(commandContext));
+
commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
+ currentDispatch++;
break;
}
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+ Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
+ TransitionAndClearForSyncScope(commandContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
DAWN_TRY(bindingTracker->Apply(commandContext));
- Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
- buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::Indirect);
+
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDispatchIndirectSignature();
commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
dispatch->indirectOffset, nullptr, 0);
+ currentDispatch++;
break;
}
@@ -1450,8 +1370,8 @@ namespace dawn_native { namespace d3d12 {
break;
}
- case Command::SetBlendColor: {
- SetBlendColorCmd* cmd = mCommands.NextCommand<SetBlendColorCmd>();
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
const std::array<float, 4> color = ConvertToFloatColor(cmd->color);
commandList->OMSetBlendFactor(color.data());
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
index 342f851a998..51dc52728bc 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
@@ -39,7 +39,8 @@ namespace dawn_native { namespace d3d12 {
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
MaybeError RecordComputePass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker);
+ BindGroupStateTracker* bindingTracker,
+ const ComputePassResourceUsage& resourceUsages);
MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker,
BeginRenderPassCmd* renderPass,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
index 5fc2e24ec4c..a363757f029 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
@@ -84,9 +84,16 @@ namespace dawn_native { namespace d3d12 {
textureDescriptor.mipLevelCount = mMipLevelCount;
textureDescriptor.sampleCount = mSampleCount;
+ // Set the release key to acquire key + 1 if not set. This allows supporting the old keyed
+ // mutex protocol during the transition to making this a required parameter.
+ ExternalMutexSerial releaseMutexKey =
+ (descriptor->releaseMutexKey != UINT64_MAX)
+ ? ExternalMutexSerial(descriptor->releaseMutexKey)
+ : ExternalMutexSerial(descriptor->acquireMutexKey + 1);
+
Ref<TextureBase> texture = backendDevice->CreateExternalTexture(
&textureDescriptor, mD3D12Resource, ExternalMutexSerial(descriptor->acquireMutexKey),
- descriptor->isSwapChainTexture, descriptor->isInitialized);
+ releaseMutexKey, descriptor->isSwapChainTexture, descriptor->isInitialized);
return reinterpret_cast<WGPUTexture>(texture.Detach());
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
index 4de76548b71..24ca1af1c6b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
@@ -160,7 +160,7 @@ namespace dawn_native { namespace d3d12 {
// The environment can only use DXC when it's available. Override the decision if it is not
// applicable.
- ApplyUseDxcToggle();
+ DAWN_TRY(ApplyUseDxcToggle());
return {};
}
@@ -196,25 +196,33 @@ namespace dawn_native { namespace d3d12 {
return ToBackend(GetAdapter())->GetBackend()->GetFactory();
}
- void Device::ApplyUseDxcToggle() {
+ MaybeError Device::ApplyUseDxcToggle() {
if (!ToBackend(GetAdapter())->GetBackend()->GetFunctions()->IsDXCAvailable()) {
ForceSetToggle(Toggle::UseDXC, false);
} else if (IsExtensionEnabled(Extension::ShaderFloat16)) {
// Currently we can only use DXC to compile HLSL shaders using float16.
ForceSetToggle(Toggle::UseDXC, true);
}
+
+ if (IsToggleEnabled(Toggle::UseDXC)) {
+ DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcCompiler());
+ DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcLibrary());
+ DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcValidator());
+ }
+
+ return {};
}
- ResultOrError<IDxcLibrary*> Device::GetOrCreateDxcLibrary() const {
- return ToBackend(GetAdapter())->GetBackend()->GetOrCreateDxcLibrary();
+ ComPtr<IDxcLibrary> Device::GetDxcLibrary() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetDxcLibrary();
}
- ResultOrError<IDxcCompiler*> Device::GetOrCreateDxcCompiler() const {
- return ToBackend(GetAdapter())->GetBackend()->GetOrCreateDxcCompiler();
+ ComPtr<IDxcCompiler> Device::GetDxcCompiler() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetDxcCompiler();
}
- ResultOrError<IDxcValidator*> Device::GetOrCreateDxcValidator() const {
- return ToBackend(GetAdapter())->GetBackend()->GetOrCreateDxcValidator();
+ ComPtr<IDxcValidator> Device::GetDxcValidator() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetDxcValidator();
}
const PlatformFunctions* Device::GetFunctions() const {
@@ -339,13 +347,13 @@ namespace dawn_native { namespace d3d12 {
}
ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
- return SwapChain::Create(this, descriptor);
+ return OldSwapChain::Create(this, descriptor);
}
ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) {
- return DAWN_VALIDATION_ERROR("New swapchains not implemented.");
+ return SwapChain::Create(this, surface, previousSwapChain, descriptor);
}
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return Texture::Create(this, descriptor);
@@ -439,13 +447,14 @@ namespace dawn_native { namespace d3d12 {
Ref<TextureBase> Device::CreateExternalTexture(const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture,
ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
bool isSwapChainTexture,
bool isInitialized) {
Ref<Texture> dawnTexture;
- if (ConsumedError(
- Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
- acquireMutexKey, isSwapChainTexture, isInitialized),
- &dawnTexture)) {
+ if (ConsumedError(Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
+ acquireMutexKey, releaseMutexKey,
+ isSwapChainTexture, isInitialized),
+ &dawnTexture)) {
return nullptr;
}
return {dawnTexture};
@@ -540,14 +549,17 @@ namespace dawn_native { namespace d3d12 {
// Currently this workaround is only needed on Intel Gen9 and Gen9.5 GPUs.
// See http://crbug.com/1161355 for more information.
- // TODO(jiawei.shao@intel.com): disable this workaround on the newer drivers when the driver
- // bug is fixed.
if (gpu_info::IsIntel(pciInfo.vendorId) &&
(gpu_info::IsSkylake(pciInfo.deviceId) || gpu_info::IsKabylake(pciInfo.deviceId) ||
gpu_info::IsCoffeelake(pciInfo.deviceId))) {
- SetToggle(
- Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
- true);
+ constexpr gpu_info::D3DDriverVersion kFirstDriverVersionWithFix = {27, 20, 100, 9466};
+ if (gpu_info::CompareD3DDriverVersion(pciInfo.vendorId,
+ ToBackend(GetAdapter())->GetDriverVersion(),
+ kFirstDriverVersionWithFix) < 0) {
+ SetToggle(
+ Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ true);
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
index 4819dd4f921..024bc503e76 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
@@ -65,9 +65,9 @@ namespace dawn_native { namespace d3d12 {
const PlatformFunctions* GetFunctions() const;
ComPtr<IDXGIFactory4> GetFactory() const;
- ResultOrError<IDxcLibrary*> GetOrCreateDxcLibrary() const;
- ResultOrError<IDxcCompiler*> GetOrCreateDxcCompiler() const;
- ResultOrError<IDxcValidator*> GetOrCreateDxcValidator() const;
+ ComPtr<IDxcLibrary> GetDxcLibrary() const;
+ ComPtr<IDxcCompiler> GetDxcCompiler() const;
+ ComPtr<IDxcValidator> GetDxcValidator() const;
ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
@@ -125,6 +125,7 @@ namespace dawn_native { namespace d3d12 {
Ref<TextureBase> CreateExternalTexture(const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture,
ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
bool isSwapChainTexture,
bool isInitialized);
ResultOrError<ComPtr<IDXGIKeyedMutex>> CreateKeyedMutexForTexture(
@@ -177,7 +178,7 @@ namespace dawn_native { namespace d3d12 {
MaybeError CheckDebugLayerAndGenerateErrors();
- void ApplyUseDxcToggle();
+ MaybeError ApplyUseDxcToggle();
ComPtr<ID3D12Fence> mFence;
HANDLE mFenceEvent = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
index 112250c9908..8e0654ea359 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
@@ -114,6 +114,15 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError PlatformFunctions::LoadD3D12() {
+#if DAWN_PLATFORM_WINUWP
+ d3d12CreateDevice = &D3D12CreateDevice;
+ d3d12GetDebugInterface = &D3D12GetDebugInterface;
+ d3d12SerializeRootSignature = &D3D12SerializeRootSignature;
+ d3d12CreateRootSignatureDeserializer = &D3D12CreateRootSignatureDeserializer;
+ d3d12SerializeVersionedRootSignature = &D3D12SerializeVersionedRootSignature;
+ d3d12CreateVersionedRootSignatureDeserializer =
+ &D3D12CreateVersionedRootSignatureDeserializer;
+#else
std::string error;
if (!mD3D12Lib.Open("d3d12.dll", &error) ||
!mD3D12Lib.GetProc(&d3d12CreateDevice, "D3D12CreateDevice", &error) ||
@@ -128,32 +137,55 @@ namespace dawn_native { namespace d3d12 {
"D3D12CreateVersionedRootSignatureDeserializer", &error)) {
return DAWN_INTERNAL_ERROR(error.c_str());
}
+#endif
return {};
}
MaybeError PlatformFunctions::LoadD3D11() {
+#if DAWN_PLATFORM_WINUWP
+ d3d11on12CreateDevice = &D3D11On12CreateDevice;
+#else
std::string error;
if (!mD3D11Lib.Open("d3d11.dll", &error) ||
!mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
return DAWN_INTERNAL_ERROR(error.c_str());
}
+#endif
return {};
}
MaybeError PlatformFunctions::LoadDXGI() {
+#if DAWN_PLATFORM_WINUWP
+# if defined(_DEBUG)
+ // DXGIGetDebugInterface1 is tagged as a development-only capability
+ // which implies that linking to this function will cause
+ // the application to fail Windows store certification
+ // But we need it when debuging using VS Graphics Diagnostics or PIX
+ // So we only link to it in debug build
+ dxgiGetDebugInterface1 = &DXGIGetDebugInterface1;
+# endif
+ createDxgiFactory2 = &CreateDXGIFactory2;
+#else
std::string error;
if (!mDXGILib.Open("dxgi.dll", &error) ||
!mDXGILib.GetProc(&dxgiGetDebugInterface1, "DXGIGetDebugInterface1", &error) ||
!mDXGILib.GetProc(&createDxgiFactory2, "CreateDXGIFactory2", &error)) {
return DAWN_INTERNAL_ERROR(error.c_str());
}
+#endif
return {};
}
void PlatformFunctions::LoadDXCLibraries() {
+ // TODO(dawn:766)
+ // Statically linked with dxcompiler.lib in UWP
+ // currently linked with dxcompiler.lib making CoreApp unable to activate
+ // LoadDXIL and LoadDXCompiler will fail in UWP, but LoadFunctions() can still be
+ // successfully executed.
+
const std::string& windowsSDKBasePath = GetWindowsSDKBasePath();
LoadDXIL(windowsSDKBasePath);
@@ -199,12 +231,15 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError PlatformFunctions::LoadFXCompiler() {
+#if DAWN_PLATFORM_WINUWP
+ d3dCompile = &D3DCompile;
+#else
std::string error;
if (!mFXCompilerLib.Open("d3dcompiler_47.dll", &error) ||
!mFXCompilerLib.GetProc(&d3dCompile, "D3DCompile", &error)) {
return DAWN_INTERNAL_ERROR(error.c_str());
}
-
+#endif
return {};
}
@@ -217,6 +252,11 @@ namespace dawn_native { namespace d3d12 {
}
void PlatformFunctions::LoadPIXRuntime() {
+ // TODO(dawn:766):
+ // In UWP PIX should be statically linked WinPixEventRuntime_UAP.lib
+ // So maybe we should put WinPixEventRuntime as a third party package
+ // Currently PIX is not going to be loaded in UWP since the following
+ // mPIXEventRuntimeLib.Open will fail.
if (!mPIXEventRuntimeLib.Open("WinPixEventRuntime.dll") ||
!mPIXEventRuntimeLib.GetProc(&pixBeginEventOnCommandList,
"PIXBeginEventOnCommandList") ||
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
index 3aea032deb1..75711958820 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
@@ -151,17 +151,17 @@ namespace dawn_native { namespace d3d12 {
return D3D12_BLEND_ZERO;
case wgpu::BlendFactor::One:
return D3D12_BLEND_ONE;
- case wgpu::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::Src:
return D3D12_BLEND_SRC_COLOR;
- case wgpu::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::OneMinusSrc:
return D3D12_BLEND_INV_SRC_COLOR;
case wgpu::BlendFactor::SrcAlpha:
return D3D12_BLEND_SRC_ALPHA;
case wgpu::BlendFactor::OneMinusSrcAlpha:
return D3D12_BLEND_INV_SRC_ALPHA;
- case wgpu::BlendFactor::DstColor:
+ case wgpu::BlendFactor::Dst:
return D3D12_BLEND_DEST_COLOR;
- case wgpu::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::OneMinusDst:
return D3D12_BLEND_INV_DEST_COLOR;
case wgpu::BlendFactor::DstAlpha:
return D3D12_BLEND_DEST_ALPHA;
@@ -169,10 +169,39 @@ namespace dawn_native { namespace d3d12 {
return D3D12_BLEND_INV_DEST_ALPHA;
case wgpu::BlendFactor::SrcAlphaSaturated:
return D3D12_BLEND_SRC_ALPHA_SAT;
- case wgpu::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::Constant:
return D3D12_BLEND_BLEND_FACTOR;
- case wgpu::BlendFactor::OneMinusBlendColor:
+ case wgpu::BlendFactor::OneMinusConstant:
return D3D12_BLEND_INV_BLEND_FACTOR;
+
+ // Deprecated blend factors should be normalized prior to this call.
+ case wgpu::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::DstColor:
+ case wgpu::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::OneMinusBlendColor:
+ UNREACHABLE();
+ }
+ }
+
+ // When a blend factor is defined for the alpha channel, any of the factors that don't
+ // explicitly state that they apply to alpha should be treated as their explicitly-alpha
+ // equivalents. See: https://github.com/gpuweb/gpuweb/issues/65
+ D3D12_BLEND D3D12AlphaBlend(wgpu::BlendFactor factor) {
+ switch (factor) {
+ case wgpu::BlendFactor::Src:
+ return D3D12_BLEND_SRC_ALPHA;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return D3D12_BLEND_INV_SRC_ALPHA;
+ case wgpu::BlendFactor::Dst:
+ return D3D12_BLEND_DEST_ALPHA;
+ case wgpu::BlendFactor::OneMinusDst:
+ return D3D12_BLEND_INV_DEST_ALPHA;
+
+ // Other blend factors translate to the same D3D12 enum as the color blend factors.
+ default:
+ return D3D12Blend(factor);
}
}
@@ -214,8 +243,8 @@ namespace dawn_native { namespace d3d12 {
blendDesc.SrcBlend = D3D12Blend(state->blend->color.srcFactor);
blendDesc.DestBlend = D3D12Blend(state->blend->color.dstFactor);
blendDesc.BlendOp = D3D12BlendOperation(state->blend->color.operation);
- blendDesc.SrcBlendAlpha = D3D12Blend(state->blend->alpha.srcFactor);
- blendDesc.DestBlendAlpha = D3D12Blend(state->blend->alpha.dstFactor);
+ blendDesc.SrcBlendAlpha = D3D12AlphaBlend(state->blend->alpha.srcFactor);
+ blendDesc.DestBlendAlpha = D3D12AlphaBlend(state->blend->alpha.dstFactor);
blendDesc.BlendOpAlpha = D3D12BlendOperation(state->blend->alpha.operation);
}
blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(state->writeMask);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
index 33ea80b4564..2b7f3e947a4 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
@@ -82,11 +82,11 @@ namespace dawn_native { namespace d3d12 {
if (enable16BitTypes) {
// enable-16bit-types are only allowed in -HV 2018 (default)
arguments.push_back(L"/enable-16bit-types");
- } else {
- // Enable FXC backward compatibility by setting the language version to 2016
- arguments.push_back(L"-HV");
- arguments.push_back(L"2016");
}
+
+ arguments.push_back(L"-HV");
+ arguments.push_back(L"2018");
+
return arguments;
}
@@ -97,16 +97,14 @@ namespace dawn_native { namespace d3d12 {
const std::string& hlslSource,
const char* entryPoint,
uint32_t compileFlags) {
- IDxcLibrary* dxcLibrary;
- DAWN_TRY_ASSIGN(dxcLibrary, device->GetOrCreateDxcLibrary());
+ ComPtr<IDxcLibrary> dxcLibrary = device->GetDxcLibrary();
ComPtr<IDxcBlobEncoding> sourceBlob;
DAWN_TRY(CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
"DXC create blob"));
- IDxcCompiler* dxcCompiler;
- DAWN_TRY_ASSIGN(dxcCompiler, device->GetOrCreateDxcCompiler());
+ ComPtr<IDxcCompiler> dxcCompiler = device->GetDxcCompiler();
std::wstring entryPointW;
DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(entryPoint));
@@ -245,30 +243,33 @@ namespace dawn_native { namespace d3d12 {
errorStream << "Tint HLSL failure:" << std::endl;
tint::transform::Manager transformManager;
- transformManager.append(std::make_unique<tint::transform::BoundArrayAccessors>());
+ tint::transform::DataMap transformInputs;
+
+ if (GetDevice()->IsRobustnessEnabled()) {
+ transformManager.Add<tint::transform::BoundArrayAccessors>();
+ }
if (stage == SingleShaderStage::Vertex) {
- transformManager.append(std::make_unique<tint::transform::FirstIndexOffset>(
+ transformManager.Add<tint::transform::FirstIndexOffset>();
+ transformInputs.Add<tint::transform::FirstIndexOffset::BindingPoint>(
layout->GetFirstIndexOffsetShaderRegister(),
- layout->GetFirstIndexOffsetRegisterSpace()));
+ layout->GetFirstIndexOffsetRegisterSpace());
}
- transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
- transformManager.append(std::make_unique<tint::transform::Renamer>());
- transformManager.append(std::make_unique<tint::transform::Hlsl>());
+ transformManager.Add<tint::transform::BindingRemapper>();
+ transformManager.Add<tint::transform::Renamer>();
+ transformManager.Add<tint::transform::Hlsl>();
- tint::transform::DataMap transformInputs;
+ // D3D12 registers like `t3` and `c3` have the same bindingOffset number in the
+ // remapping but should not be considered a collision because they have different types.
+ const bool mayCollide = true;
transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
- std::move(accessControls));
- tint::transform::Transform::Output output =
- transformManager.Run(GetTintProgram(), transformInputs);
-
- tint::Program& program = output.program;
- if (!program.IsValid()) {
- errorStream << "Tint program transform error: " << program.Diagnostics().str()
- << std::endl;
- return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
- }
+ std::move(accessControls), mayCollide);
- if (auto* data = output.data.Get<tint::transform::FirstIndexOffset::Data>()) {
+ tint::Program program;
+ tint::transform::DataMap transformOutputs;
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+ &transformOutputs, nullptr));
+
+ if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
firstOffsetInfo->usesVertexIndex = data->has_vertex_index;
if (firstOffsetInfo->usesVertexIndex) {
firstOffsetInfo->vertexIndexOffset = data->first_vertex_offset;
@@ -279,7 +280,7 @@ namespace dawn_native { namespace d3d12 {
}
}
- if (auto* data = output.data.Get<tint::transform::Renamer::Data>()) {
+ if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
auto it = data->remappings.find(entryPointName);
if (it == data->remappings.end()) {
return DAWN_VALIDATION_ERROR("Could not find remapped name for entry point.");
@@ -313,12 +314,15 @@ namespace dawn_native { namespace d3d12 {
options_glsl.force_zero_initialized_variables = true;
spirv_cross::CompilerHLSL::Options options_hlsl;
- if (GetDevice()->IsExtensionEnabled(Extension::ShaderFloat16)) {
+ if (GetDevice()->IsToggleEnabled(Toggle::UseDXC)) {
options_hlsl.shader_model = ToBackend(GetDevice())->GetDeviceInfo().shaderModel;
- options_hlsl.enable_16bit_types = true;
} else {
options_hlsl.shader_model = 51;
}
+
+ if (GetDevice()->IsExtensionEnabled(Extension::ShaderFloat16)) {
+ options_hlsl.enable_16bit_types = true;
+ }
// PointCoord and PointSize are not supported in HLSL
// TODO (hao.x.li@intel.com): The point_coord_compat and point_size_compat are
// required temporarily for https://bugs.chromium.org/p/dawn/issues/detail?id=146,
@@ -480,8 +484,7 @@ namespace dawn_native { namespace d3d12 {
}
ResultOrError<uint64_t> ShaderModule::GetDXCompilerVersion() const {
- ComPtr<IDxcValidator> dxcValidator;
- DAWN_TRY_ASSIGN(dxcValidator, ToBackend(GetDevice())->GetOrCreateDxcValidator());
+ ComPtr<IDxcValidator> dxcValidator = ToBackend(GetDevice())->GetDxcValidator();
ComPtr<IDxcVersionInfo> versionInfo;
DAWN_TRY(CheckHRESULT(dxcValidator.As(&versionInfo),
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
index 8fd4554e7fd..9002cc4c2db 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
@@ -14,19 +14,70 @@
#include "dawn_native/d3d12/SwapChainD3D12.h"
+#include "dawn_native/Surface.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/TextureD3D12.h"
#include <dawn/dawn_wsi.h>
namespace dawn_native { namespace d3d12 {
+ namespace {
+
+ uint32_t PresentModeToBufferCount(wgpu::PresentMode mode) {
+ switch (mode) {
+ case wgpu::PresentMode::Immediate:
+ case wgpu::PresentMode::Fifo:
+ return 2;
+ case wgpu::PresentMode::Mailbox:
+ return 3;
+ }
+ }
+
+ uint32_t PresentModeToSwapInterval(wgpu::PresentMode mode) {
+ switch (mode) {
+ case wgpu::PresentMode::Immediate:
+ case wgpu::PresentMode::Mailbox:
+ return 0;
+ case wgpu::PresentMode::Fifo:
+ return 1;
+ }
+ }
+
+ UINT PresentModeToSwapChainFlags(wgpu::PresentMode mode) {
+ UINT flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
+
+ if (mode == wgpu::PresentMode::Immediate) {
+ flags |= DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING;
+ }
+
+ return flags;
+ }
+
+ DXGI_USAGE ToDXGIUsage(wgpu::TextureUsage usage) {
+ DXGI_USAGE dxgiUsage = DXGI_CPU_ACCESS_NONE;
+ if (usage & wgpu::TextureUsage::Sampled) {
+ dxgiUsage |= DXGI_USAGE_SHADER_INPUT;
+ }
+ if (usage & wgpu::TextureUsage::Storage) {
+ dxgiUsage |= DXGI_USAGE_UNORDERED_ACCESS;
+ }
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ dxgiUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ }
+ return dxgiUsage;
+ }
+
+ } // namespace
+
+ // OldSwapChain
// static
- Ref<SwapChain> SwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
- return AcquireRef(new SwapChain(device, descriptor));
+ Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(device, descriptor));
}
- SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
: OldSwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
DawnWSIContextD3D12 wsiContext = {};
@@ -37,10 +88,9 @@ namespace dawn_native { namespace d3d12 {
mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
}
- SwapChain::~SwapChain() {
- }
+ OldSwapChain::~OldSwapChain() = default;
- TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
DeviceBase* device = GetDevice();
const auto& im = GetImplementation();
DawnSwapChainNextTexture next = {};
@@ -61,7 +111,7 @@ namespace dawn_native { namespace d3d12 {
return dawnTexture.Detach();
}
- MaybeError SwapChain::OnBeforePresent(TextureViewBase* view) {
+ MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
Device* device = ToBackend(GetDevice());
CommandRecordingContext* commandContext;
@@ -77,4 +127,234 @@ namespace dawn_native { namespace d3d12 {
return {};
}
+ // SwapChain
+
+ // static
+ ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+ DAWN_TRY(swapchain->Initialize(previousSwapChain));
+ return swapchain;
+ }
+
+ SwapChain::~SwapChain() {
+ DetachFromSurface();
+ }
+
+ // Initializes the swapchain on the surface. Note that `previousSwapChain` may or may not be
+ // nullptr. If it is not nullptr it means that it is the swapchain previously in use on the
+ // surface and that we have a chance to reuse it's underlying IDXGISwapChain and "buffers".
+ MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+ ASSERT(GetSurface()->GetType() == Surface::Type::WindowsHWND);
+
+ // Precompute the configuration parameters we want for the DXGI swapchain.
+ mConfig.bufferCount = PresentModeToBufferCount(GetPresentMode());
+ mConfig.format = D3D12TextureFormat(GetFormat());
+ mConfig.swapChainFlags = PresentModeToSwapChainFlags(GetPresentMode());
+ mConfig.usage = ToDXGIUsage(GetUsage());
+
+ // There is no previous swapchain so we can create one directly and don't have anything else
+ // to do.
+ if (previousSwapChain == nullptr) {
+ return InitializeSwapChainFromScratch();
+ }
+
+ // TODO(cwallez@chromium.org): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ if (previousSwapChain->GetBackendType() != wgpu::BackendType::D3D12) {
+ return DAWN_VALIDATION_ERROR("d3d12::SwapChain cannot switch between APIs");
+ }
+
+ // TODO(cwallez@chromium.org): use ToBackend once OldSwapChainBase is removed.
+ SwapChain* previousD3D12SwapChain = static_cast<SwapChain*>(previousSwapChain);
+
+ // TODO(cwallez@chromium.org): Figure out switching an HWND between devices, it might
+ // require just losing the reference to the swapchain, but might also need to wait for
+ // all previous operations to complete.
+ if (GetDevice() != previousSwapChain->GetDevice()) {
+ return DAWN_VALIDATION_ERROR("d3d12::SwapChain cannot switch between devices");
+ }
+
+ // The previous swapchain is on the same device so we want to reuse it but it is still not
+ // always possible. Because DXGI requires that a new swapchain be created if the
+ // DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING flag is changed.
+ bool canReuseSwapChain =
+ ((mConfig.swapChainFlags ^ previousD3D12SwapChain->mConfig.swapChainFlags) &
+ DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) == 0;
+
+ // We can't reuse the previous swapchain, so we destroy it and wait for all of its reference
+ // to be forgotten (otherwise DXGI complains that there are outstanding references).
+ if (!canReuseSwapChain) {
+ DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
+ return InitializeSwapChainFromScratch();
+ }
+
+ // After all this we know we can reuse the swapchain, see if it is possible to also reuse
+ // the buffers.
+ mDXGISwapChain = std::move(previousD3D12SwapChain->mDXGISwapChain);
+
+ bool canReuseBuffers = GetWidth() == previousSwapChain->GetWidth() &&
+ GetHeight() == previousSwapChain->GetHeight() &&
+ GetFormat() == previousSwapChain->GetFormat() &&
+ GetPresentMode() == previousSwapChain->GetPresentMode();
+ if (canReuseBuffers) {
+ mBuffers = std::move(previousD3D12SwapChain->mBuffers);
+ mBufferLastUsedSerials = std::move(previousD3D12SwapChain->mBufferLastUsedSerials);
+ mCurrentBuffer = previousD3D12SwapChain->mCurrentBuffer;
+ return {};
+ }
+
+ // We can't reuse the buffers so we need to resize, IDXGSwapChain->ResizeBuffers requires
+ // that all references to buffers are lost before it is called. Contrary to D3D11, the
+ // application is responsible for keeping references to the buffers until the GPU is done
+ // using them so we have no choice but to synchrounously wait for all operations to complete
+ // on the previous swapchain and then lose references to its buffers.
+ DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
+ DAWN_TRY(
+ CheckHRESULT(mDXGISwapChain->ResizeBuffers(mConfig.bufferCount, GetWidth(), GetHeight(),
+ mConfig.format, mConfig.swapChainFlags),
+ "IDXGISwapChain::ResizeBuffer"));
+ return CollectSwapChainBuffers();
+ }
+
+ MaybeError SwapChain::InitializeSwapChainFromScratch() {
+ ASSERT(mDXGISwapChain == nullptr);
+
+ Device* device = ToBackend(GetDevice());
+
+ DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+ swapChainDesc.Width = GetWidth();
+ swapChainDesc.Height = GetHeight();
+ swapChainDesc.Format = mConfig.format;
+ swapChainDesc.Stereo = false;
+ swapChainDesc.SampleDesc.Count = 1;
+ swapChainDesc.SampleDesc.Quality = 0;
+ swapChainDesc.BufferUsage = mConfig.usage;
+ swapChainDesc.BufferCount = mConfig.bufferCount;
+ swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
+ swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+ swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
+ swapChainDesc.Flags = mConfig.swapChainFlags;
+
+ ComPtr<IDXGIFactory2> factory2 = nullptr;
+ DAWN_TRY(CheckHRESULT(device->GetFactory()->QueryInterface(IID_PPV_ARGS(&factory2)),
+ "Getting IDXGIFactory2"));
+
+ ComPtr<IDXGISwapChain1> swapChain1;
+ switch (GetSurface()->GetType()) {
+ case Surface::Type::WindowsHWND: {
+ DAWN_TRY(CheckHRESULT(
+ factory2->CreateSwapChainForHwnd(device->GetCommandQueue().Get(),
+ static_cast<HWND>(GetSurface()->GetHWND()),
+ &swapChainDesc, nullptr, nullptr, &swapChain1),
+ "Creating the IDXGISwapChain1"));
+ break;
+ }
+ case Surface::Type::WindowsCoreWindow: {
+ DAWN_TRY(CheckHRESULT(
+ factory2->CreateSwapChainForCoreWindow(device->GetCommandQueue().Get(),
+ GetSurface()->GetCoreWindow(),
+ &swapChainDesc, nullptr, &swapChain1),
+ "Creating the IDXGISwapChain1"));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ DAWN_TRY(CheckHRESULT(swapChain1.As(&mDXGISwapChain), "Gettting IDXGISwapChain1"));
+
+ return CollectSwapChainBuffers();
+ }
+
+ MaybeError SwapChain::CollectSwapChainBuffers() {
+ ASSERT(mDXGISwapChain != nullptr);
+ ASSERT(mBuffers.empty());
+
+ mBuffers.resize(mConfig.bufferCount);
+ for (uint32_t i = 0; i < mConfig.bufferCount; i++) {
+ DAWN_TRY(CheckHRESULT(mDXGISwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])),
+ "Getting IDXGISwapChain buffer"));
+ }
+
+ // Pretend all the buffers were last used at the beginning of time.
+ mBufferLastUsedSerials.resize(mConfig.bufferCount, ExecutionSerial(0));
+ return {};
+ }
+
+ MaybeError SwapChain::PresentImpl() {
+ Device* device = ToBackend(GetDevice());
+
+ // Transition the texture to the present state as required by IDXGISwapChain1::Present()
+ // TODO(cwallez@chromium.org): Remove the need for this by eagerly transitioning the
+ // presentable texture to present at the end of submits that use them.
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+ mApiTexture->TrackUsageAndTransitionNow(commandContext, kPresentTextureUsage,
+ mApiTexture->GetAllSubresources());
+ DAWN_TRY(device->ExecutePendingCommandContext());
+
+ // Do the actual present. DXGI_STATUS_OCCLUDED is a valid return value that's just a
+ // message to the application that it could stop rendering.
+ HRESULT presentResult =
+ mDXGISwapChain->Present(PresentModeToSwapInterval(GetPresentMode()), 0);
+ if (presentResult != DXGI_STATUS_OCCLUDED) {
+ DAWN_TRY(CheckHRESULT(presentResult, "IDXGISwapChain::Present"));
+ }
+
+ // Record that "new" is the last time the buffer has been used.
+ DAWN_TRY(device->NextSerial());
+ mBufferLastUsedSerials[mCurrentBuffer] = device->GetPendingCommandSerial();
+
+ mApiTexture->APIDestroy();
+ mApiTexture = nullptr;
+
+ return {};
+ }
+
+ ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
+ Device* device = ToBackend(GetDevice());
+
+ // Synchronously wait until previous operations on the next swapchain buffer are finished.
+ // This is the logic that performs frame pacing.
+ // TODO(cwallez@chromium.org): Consider whether this should be lifted for Mailbox so that
+ // there is not frame pacing.
+ mCurrentBuffer = mDXGISwapChain->GetCurrentBackBufferIndex();
+ DAWN_TRY(device->WaitForSerial(mBufferLastUsedSerials[mCurrentBuffer]));
+
+ // Create the API side objects for this use of the swapchain's buffer.
+ TextureDescriptor descriptor = GetSwapChainBaseTextureDescriptor(this);
+ DAWN_TRY_ASSIGN(mApiTexture, Texture::Create(ToBackend(GetDevice()), &descriptor,
+ mBuffers[mCurrentBuffer]));
+
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mApiTexture->APICreateView();
+ }
+
+ MaybeError SwapChain::DetachAndWaitForDeallocation() {
+ DetachFromSurface();
+
+ // DetachFromSurface calls Texture->Destroy that enqueues the D3D12 resource in a
+ // SerialQueue with the current "pending serial" so that we don't destroy the texture
+ // before it is finished being used. Flush the commands and wait for that serial to be
+ // passed, then Tick the device to make sure the reference to the D3D12 texture is removed.
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(device->NextSerial());
+ DAWN_TRY(device->WaitForSerial(device->GetLastSubmittedCommandSerial()));
+ return device->TickImpl();
+ }
+
+ void SwapChain::DetachFromSurfaceImpl() {
+ if (mApiTexture != nullptr) {
+ mApiTexture->APIDestroy();
+ mApiTexture = nullptr;
+ }
+
+ mDXGISwapChain = nullptr;
+ mBuffers.clear();
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
index 4083b04a144..bc476d1193b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
@@ -17,23 +17,71 @@
#include "dawn_native/SwapChain.h"
+#include "dawn_native/IntegerTypes.h"
+#include "dawn_native/d3d12/d3d12_platform.h"
+
namespace dawn_native { namespace d3d12 {
class Device;
+ class Texture;
- class SwapChain final : public OldSwapChainBase {
+ class OldSwapChain final : public OldSwapChainBase {
public:
- static Ref<SwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+ static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
protected:
- SwapChain(Device* device, const SwapChainDescriptor* descriptor);
- ~SwapChain() override;
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChain() override;
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
MaybeError OnBeforePresent(TextureViewBase* view) override;
wgpu::TextureUsage mTextureUsage;
};
+ class SwapChain final : public NewSwapChainBase {
+ public:
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+
+ private:
+ ~SwapChain() override;
+
+ using NewSwapChainBase::NewSwapChainBase;
+ MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+ struct Config {
+ // Information that's passed to the D3D12 swapchain creation call.
+ UINT bufferCount;
+ UINT swapChainFlags;
+ DXGI_FORMAT format;
+ DXGI_USAGE usage;
+ };
+
+ // NewSwapChainBase implementation
+ MaybeError PresentImpl() override;
+ ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+
+ // Does the swapchain initialization steps assuming there is nothing we can reuse.
+ MaybeError InitializeSwapChainFromScratch();
+ // Does the swapchain initialization step of gathering the buffers.
+ MaybeError CollectSwapChainBuffers();
+ // Calls DetachFromSurface but also synchronously waits until all references to the
+ // swapchain and buffers are removed, as that's a constraint for some DXGI operations.
+ MaybeError DetachAndWaitForDeallocation();
+
+ Config mConfig;
+
+ ComPtr<IDXGISwapChain3> mDXGISwapChain;
+ std::vector<ComPtr<ID3D12Resource>> mBuffers;
+ std::vector<ExecutionSerial> mBufferLastUsedSerials;
+ uint32_t mCurrentBuffer = 0;
+
+ Ref<Texture> mApiTexture;
+ };
+
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_SWAPCHAIN_D3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
index e5307cd7deb..50c3f1e7e1d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
@@ -33,13 +33,13 @@ namespace dawn_native { namespace d3d12 {
}
} // namespace
- Texture2DCopySplit ComputeTextureCopySplit(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- Texture2DCopySplit copy;
+ TextureCopySubresource ComputeTextureCopySubresource(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ TextureCopySubresource copy;
ASSERT(bytesPerRow % blockInfo.byteSize == 0);
@@ -215,7 +215,7 @@ namespace dawn_native { namespace d3d12 {
const uint64_t bytesPerSlice = bytesPerRow * rowsPerImage;
- // The function ComputeTextureCopySplit() decides how to split the copy based on:
+ // The function ComputeTextureCopySubresource() decides how to split the copy based on:
// - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
// - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PITCH_ALIGNMENT (256)
// Each slice of a 2D array or 3D copy might need to be split, but because of the WebGPU
@@ -233,23 +233,23 @@ namespace dawn_native { namespace d3d12 {
copyFirstLayerOrigin.z = 0;
}
- copies.copies2D[0] = ComputeTextureCopySplit(copyFirstLayerOrigin, copyOneLayerSize,
- blockInfo, offset, bytesPerRow, rowsPerImage);
+ copies.copySubresources[0] = ComputeTextureCopySubresource(
+ copyFirstLayerOrigin, copyOneLayerSize, blockInfo, offset, bytesPerRow, rowsPerImage);
- // When the copy only refers one texture 2D array layer or a 3D texture, copies.copies2D[1]
- // will never be used so we can safely early return here.
+ // When the copy only refers one texture 2D array layer or a 3D texture,
+ // copies.copySubresources[1] will never be used so we can safely early return here.
if (copySize.depthOrArrayLayers == 1 || is3DTexture) {
return copies;
}
if (bytesPerSlice % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0) {
- copies.copies2D[1] = copies.copies2D[0];
- copies.copies2D[1].offset += bytesPerSlice;
+ copies.copySubresources[1] = copies.copySubresources[0];
+ copies.copySubresources[1].offset += bytesPerSlice;
} else {
const uint64_t bufferOffsetNextLayer = offset + bytesPerSlice;
- copies.copies2D[1] =
- ComputeTextureCopySplit(copyFirstLayerOrigin, copyOneLayerSize, blockInfo,
- bufferOffsetNextLayer, bytesPerRow, rowsPerImage);
+ copies.copySubresources[1] =
+ ComputeTextureCopySubresource(copyFirstLayerOrigin, copyOneLayerSize, blockInfo,
+ bufferOffsetNextLayer, bytesPerRow, rowsPerImage);
}
return copies;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
index f4bdb7b6d6c..89c31d071a1 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
@@ -27,7 +27,7 @@ namespace dawn_native {
namespace dawn_native { namespace d3d12 {
- struct Texture2DCopySplit {
+ struct TextureCopySubresource {
static constexpr unsigned int kMaxTextureCopyRegions = 2;
struct CopyInfo {
@@ -44,17 +44,17 @@ namespace dawn_native { namespace d3d12 {
};
struct TextureCopySplits {
- static constexpr uint32_t kMaxTextureCopySplits = 2;
+ static constexpr uint32_t kMaxTextureCopySubresources = 2;
- std::array<Texture2DCopySplit, kMaxTextureCopySplits> copies2D;
+ std::array<TextureCopySubresource, kMaxTextureCopySubresources> copySubresources;
};
- Texture2DCopySplit ComputeTextureCopySplit(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage);
+ TextureCopySubresource ComputeTextureCopySubresource(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
TextureCopySplits ComputeTextureCopySplits(Origin3D origin,
Extent3D copySize,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
index fdcd43e4843..7e72f4d59c8 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
@@ -422,12 +422,14 @@ namespace dawn_native { namespace d3d12 {
const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture,
ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
bool isSwapChainTexture,
bool isInitialized) {
Ref<Texture> dawnTexture =
AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
DAWN_TRY(dawnTexture->InitializeAsExternalTexture(descriptor, std::move(d3d12Texture),
- acquireMutexKey, isSwapChainTexture));
+ acquireMutexKey, releaseMutexKey,
+ isSwapChainTexture));
// Importing a multi-planar format must be initialized. This is required because
// a shared multi-planar format cannot be initialized by Dawn.
@@ -454,6 +456,7 @@ namespace dawn_native { namespace d3d12 {
MaybeError Texture::InitializeAsExternalTexture(const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture,
ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
bool isSwapChainTexture) {
Device* dawnDevice = ToBackend(GetDevice());
@@ -464,6 +467,7 @@ namespace dawn_native { namespace d3d12 {
"D3D12 acquiring shared mutex"));
mAcquireMutexKey = acquireMutexKey;
+ mReleaseMutexKey = releaseMutexKey;
mDxgiKeyedMutex = std::move(dxgiKeyedMutex);
mSwapChainTexture = isSwapChainTexture;
@@ -529,10 +533,7 @@ namespace dawn_native { namespace d3d12 {
// When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
// texture is owned externally. The texture's owning entity must remain responsible for
// memory management.
- mResourceAllocation = { info, 0, std::move(d3d12Texture), nullptr };
-
- SetIsSubresourceContentInitialized(true, GetAllSubresources());
-
+ mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
return {};
}
@@ -573,7 +574,7 @@ namespace dawn_native { namespace d3d12 {
mSwapChainTexture = false;
if (mDxgiKeyedMutex != nullptr) {
- mDxgiKeyedMutex->ReleaseSync(uint64_t(mAcquireMutexKey) + 1);
+ mDxgiKeyedMutex->ReleaseSync(uint64_t(mReleaseMutexKey));
device->ReleaseKeyedMutexForTexture(std::move(mDxgiKeyedMutex));
}
}
@@ -779,7 +780,7 @@ namespace dawn_native { namespace d3d12 {
void Texture::TrackUsageAndGetResourceBarrierForPass(
CommandRecordingContext* commandContext,
std::vector<D3D12_RESOURCE_BARRIER>* barriers,
- const PassTextureUsage& textureUsages) {
+ const TextureSubresourceUsage& textureUsages) {
if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
// Track the underlying heap to ensure residency.
Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
@@ -793,44 +794,56 @@ namespace dawn_native { namespace d3d12 {
// This transitions assume it is a 2D texture
ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
- mSubresourceStateAndDecay.Merge(
- textureUsages, [&](const SubresourceRange& mergeRange, StateAndDecay* state,
- wgpu::TextureUsage usage) {
- // Skip if this subresource is not used during the current pass
- if (usage == wgpu::TextureUsage::None) {
- return;
- }
+ mSubresourceStateAndDecay.Merge(textureUsages, [&](const SubresourceRange& mergeRange,
+ StateAndDecay* state,
+ wgpu::TextureUsage usage) {
+ // Skip if this subresource is not used during the current pass
+ if (usage == wgpu::TextureUsage::None) {
+ return;
+ }
- D3D12_RESOURCE_STATES newState = D3D12TextureUsage(usage, GetFormat());
- TransitionSubresourceRange(barriers, mergeRange, state, newState,
- pendingCommandSerial);
- });
+ D3D12_RESOURCE_STATES newState = D3D12TextureUsage(usage, GetFormat());
+ TransitionSubresourceRange(barriers, mergeRange, state, newState, pendingCommandSerial);
+ });
}
D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(uint32_t mipLevel,
uint32_t baseArrayLayer,
uint32_t layerCount) const {
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
rtvDesc.Format = GetD3D12Format();
if (IsMultisampledTexture()) {
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
ASSERT(GetNumMipLevels() == 1);
ASSERT(layerCount == 1);
ASSERT(baseArrayLayer == 0);
ASSERT(mipLevel == 0);
rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
- } else {
- // Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base array
- // layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat them as
- // 1-layer 2D array textures. (Just like how we treat SRVs)
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
- // _rtv
- rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
- rtvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
- rtvDesc.Texture2DArray.ArraySize = layerCount;
- rtvDesc.Texture2DArray.MipSlice = mipLevel;
- rtvDesc.Texture2DArray.PlaneSlice = 0;
+ return rtvDesc;
+ }
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ // Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base
+ // array layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat
+ // them as 1-layer 2D array textures. (Just like how we treat SRVs)
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
+ // _rtv
+ rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
+ rtvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
+ rtvDesc.Texture2DArray.ArraySize = layerCount;
+ rtvDesc.Texture2DArray.MipSlice = mipLevel;
+ rtvDesc.Texture2DArray.PlaneSlice = 0;
+ break;
+ case wgpu::TextureDimension::e3D:
+ rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
+ rtvDesc.Texture3D.MipSlice = mipLevel;
+ rtvDesc.Texture3D.FirstWSlice = baseArrayLayer;
+ rtvDesc.Texture3D.WSize = layerCount;
+ break;
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
+ break;
}
return rtvDesc;
}
@@ -861,7 +874,6 @@ namespace dawn_native { namespace d3d12 {
MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
const SubresourceRange& range,
TextureBase::ClearValue clearValue) {
-
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
Device* device = ToBackend(GetDevice());
@@ -954,9 +966,13 @@ namespace dawn_native { namespace d3d12 {
for (Aspect aspect : IterateEnumMask(range.aspects)) {
const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
- uint32_t bytesPerRow = Align((GetWidth() / blockInfo.width) * blockInfo.byteSize,
- kTextureBytesPerRowAlignment);
- uint64_t bufferSize = bytesPerRow * (GetHeight() / blockInfo.height);
+ Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+
+ uint32_t bytesPerRow =
+ Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+ kTextureBytesPerRowAlignment);
+ uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+ largestMipSize.depthOrArrayLayers;
DynamicUploader* uploader = device->GetDynamicUploader();
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle,
@@ -969,8 +985,8 @@ namespace dawn_native { namespace d3d12 {
// compute d3d12 texture copy locations for texture and buffer
Extent3D copySize = GetMipLevelPhysicalSize(level);
- uint32_t rowsPerImage = GetHeight() / blockInfo.height;
- Texture2DCopySplit copySplit = ComputeTextureCopySplit(
+ uint32_t rowsPerImage = copySize.height / blockInfo.height;
+ TextureCopySubresource copySplit = ComputeTextureCopySubresource(
{0, 0, 0}, copySize, blockInfo, uploadHandle.startOffset, bytesPerRow,
rowsPerImage);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
index 528dd216984..d760c1e2ea0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
@@ -43,6 +43,7 @@ namespace dawn_native { namespace d3d12 {
const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture,
ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
bool isSwapChainTexture,
bool isInitialized);
static ResultOrError<Ref<Texture>> Create(Device* device,
@@ -64,7 +65,7 @@ namespace dawn_native { namespace d3d12 {
void TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
std::vector<D3D12_RESOURCE_BARRIER>* barrier,
- const PassTextureUsage& textureUsages);
+ const TextureSubresourceUsage& textureUsages);
void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
std::vector<D3D12_RESOURCE_BARRIER>* barrier,
wgpu::TextureUsage usage,
@@ -89,6 +90,7 @@ namespace dawn_native { namespace d3d12 {
MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture,
ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
bool isSwapChainTexture);
MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
@@ -123,6 +125,7 @@ namespace dawn_native { namespace d3d12 {
bool mSwapChainTexture = false;
ExternalMutexSerial mAcquireMutexKey = ExternalMutexSerial(0);
+ ExternalMutexSerial mReleaseMutexKey = ExternalMutexSerial(0);
ComPtr<IDXGIKeyedMutex> mDxgiKeyedMutex;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
index b1902100785..98e9785be97 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
@@ -68,12 +68,12 @@ namespace dawn_native { namespace d3d12 {
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
uint32_t level,
- uint32_t slice,
+ uint32_t layer,
Aspect aspect) {
D3D12_TEXTURE_COPY_LOCATION copyLocation;
copyLocation.pResource = texture->GetD3D12Resource();
copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
- copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, slice, aspect);
+ copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, layer, aspect);
return copyLocation;
}
@@ -143,25 +143,25 @@ namespace dawn_native { namespace d3d12 {
}
void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
- const Texture2DCopySplit& baseCopySplit,
+ const TextureCopySubresource& baseCopySplit,
ID3D12Resource* bufferResource,
uint64_t baseOffset,
uint64_t bufferBytesPerRow,
Texture* texture,
uint32_t textureMiplevel,
- uint32_t textureSlice,
+ uint32_t textureLayer,
Aspect aspect) {
ASSERT(HasOneBit(aspect));
const D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureSlice, aspect);
+ ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
const uint64_t offsetBytes = baseCopySplit.offset + baseOffset;
for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
- const Texture2DCopySplit::CopyInfo& info = baseCopySplit.copies[i];
+ const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
// TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
- // members in Texture2DCopySplit::CopyInfo.
+ // members in TextureCopySubresource::CopyInfo.
const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
offsetBytes, bufferBytesPerRow, aspect);
@@ -191,29 +191,31 @@ namespace dawn_native { namespace d3d12 {
const uint64_t bytesPerSlice = bytesPerRow * rowsPerImage;
- // copySplits.copies2D[1] is always calculated for the second copy slice with
+ // copySplits.copySubresources[1] is always calculated for the second copy slice with
// extra "bytesPerSlice" copy offset compared with the first copy slice. So
// here we use an array bufferOffsetsForNextSlice to record the extra offsets
// for each copy slice: bufferOffsetsForNextSlice[0] is the extra offset for
- // the next copy slice that uses copySplits.copies2D[0], and
+ // the next copy slice that uses copySplits.copySubresources[0], and
// bufferOffsetsForNextSlice[1] is the extra offset for the next copy slice
- // that uses copySplits.copies2D[1].
- std::array<uint64_t, TextureCopySplits::kMaxTextureCopySplits> bufferOffsetsForNextSlice = {
- {0u, 0u}};
+ // that uses copySplits.copySubresources[1].
+ std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources>
+ bufferOffsetsForNextSlice = {{0u, 0u}};
- for (uint32_t copySlice = 0; copySlice < copySize.depthOrArrayLayers; ++copySlice) {
- const uint32_t splitIndex = copySlice % copySplits.copies2D.size();
+ for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
+ const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
- const Texture2DCopySplit& copySplitPerLayerBase = copySplits.copies2D[splitIndex];
+ const TextureCopySubresource& copySplitPerLayerBase =
+ copySplits.copySubresources[splitIndex];
const uint64_t bufferOffsetForNextSlice = bufferOffsetsForNextSlice[splitIndex];
- const uint32_t copyTextureLayer = copySlice + textureCopy.origin.z;
+ const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
RecordCopyBufferToTextureFromTextureCopySplit(
commandContext->GetCommandList(), copySplitPerLayerBase, bufferResource,
bufferOffsetForNextSlice, bytesPerRow, texture, textureCopy.mipLevel,
copyTextureLayer, aspect);
- bufferOffsetsForNextSlice[splitIndex] += bytesPerSlice * copySplits.copies2D.size();
+ bufferOffsetsForNextSlice[splitIndex] +=
+ bytesPerSlice * copySplits.copySubresources.size();
}
}
@@ -233,8 +235,8 @@ namespace dawn_native { namespace d3d12 {
textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage, true);
RecordCopyBufferToTextureFromTextureCopySplit(
- commandContext->GetCommandList(), copySplits.copies2D[0], bufferResource, 0,
- bytesPerRow, texture, textureCopy.mipLevel, textureCopy.origin.z, aspect);
+ commandContext->GetCommandList(), copySplits.copySubresources[0], bufferResource, 0,
+ bytesPerRow, texture, textureCopy.mipLevel, 0, aspect);
}
void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
@@ -261,24 +263,24 @@ namespace dawn_native { namespace d3d12 {
}
void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
- const Texture2DCopySplit& baseCopySplit,
+ const TextureCopySubresource& baseCopySplit,
Buffer* buffer,
uint64_t baseOffset,
uint64_t bufferBytesPerRow,
Texture* texture,
uint32_t textureMiplevel,
- uint32_t textureSlice,
+ uint32_t textureLayer,
Aspect aspect) {
const D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureSlice, aspect);
+ ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
const uint64_t offset = baseCopySplit.offset + baseOffset;
for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
- const Texture2DCopySplit::CopyInfo& info = baseCopySplit.copies[i];
+ const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
// TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
- // members in Texture2DCopySplit::CopyInfo.
+ // members in TextureCopySubresource::CopyInfo.
const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
ComputeBufferLocationForCopyTextureRegion(texture, buffer->GetD3D12Resource(),
info.bufferSize, offset,
@@ -309,28 +311,30 @@ namespace dawn_native { namespace d3d12 {
const uint64_t bytesPerSlice = bufferCopy.bytesPerRow * bufferCopy.rowsPerImage;
- // copySplits.copies2D[1] is always calculated for the second copy slice with
+ // copySplits.copySubresources[1] is always calculated for the second copy slice with
// extra "bytesPerSlice" copy offset compared with the first copy slice. So
// here we use an array bufferOffsetsForNextSlice to record the extra offsets
// for each copy slice: bufferOffsetsForNextSlice[0] is the extra offset for
- // the next copy slice that uses copySplits.copies2D[0], and
+ // the next copy slice that uses copySplits.copySubresources[0], and
// bufferOffsetsForNextSlice[1] is the extra offset for the next copy slice
- // that uses copySplits.copies2D[1].
- std::array<uint64_t, TextureCopySplits::kMaxTextureCopySplits> bufferOffsetsForNextSlice = {
- {0u, 0u}};
- for (uint32_t copySlice = 0; copySlice < copySize.depthOrArrayLayers; ++copySlice) {
- const uint32_t splitIndex = copySlice % copySplits.copies2D.size();
-
- const Texture2DCopySplit& copySplitPerLayerBase = copySplits.copies2D[splitIndex];
+ // that uses copySplits.copySubresources[1].
+ std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources>
+ bufferOffsetsForNextSlice = {{0u, 0u}};
+ for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
+ const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
+
+ const TextureCopySubresource& copySplitPerLayerBase =
+ copySplits.copySubresources[splitIndex];
const uint64_t bufferOffsetForNextSlice = bufferOffsetsForNextSlice[splitIndex];
- const uint32_t copyTextureLayer = copySlice + textureCopy.origin.z;
+ const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
RecordCopyTextureToBufferFromTextureCopySplit(
commandList, copySplitPerLayerBase, buffer, bufferOffsetForNextSlice,
bufferCopy.bytesPerRow, texture, textureCopy.mipLevel, copyTextureLayer,
textureCopy.aspect);
- bufferOffsetsForNextSlice[splitIndex] += bytesPerSlice * copySplits.copies2D.size();
+ bufferOffsetsForNextSlice[splitIndex] +=
+ bytesPerSlice * copySplits.copySubresources.size();
}
}
@@ -349,9 +353,9 @@ namespace dawn_native { namespace d3d12 {
ComputeTextureCopySplits(textureCopy.origin, copySize, blockInfo, bufferCopy.offset,
bufferCopy.bytesPerRow, bufferCopy.rowsPerImage, true);
- RecordCopyTextureToBufferFromTextureCopySplit(
- commandList, copySplits.copies2D[0], buffer, 0, bufferCopy.bytesPerRow, texture,
- textureCopy.mipLevel, textureCopy.origin.z, textureCopy.aspect);
+ RecordCopyTextureToBufferFromTextureCopySplit(commandList, copySplits.copySubresources[0],
+ buffer, 0, bufferCopy.bytesPerRow, texture,
+ textureCopy.mipLevel, 0, textureCopy.aspect);
}
void RecordCopyTextureToBuffer(ID3D12GraphicsCommandList* commandList,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
index 719a19a8544..fea27d57a7d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
@@ -30,7 +30,7 @@ namespace dawn_native { namespace d3d12 {
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
uint32_t level,
- uint32_t slice,
+ uint32_t layer,
Aspect aspect);
D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
@@ -45,13 +45,13 @@ namespace dawn_native { namespace d3d12 {
bool IsTypeless(DXGI_FORMAT format);
void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
- const Texture2DCopySplit& baseCopySplit,
+ const TextureCopySubresource& baseCopySplit,
ID3D12Resource* bufferResource,
uint64_t baseOffset,
uint64_t bufferBytesPerRow,
Texture* texture,
uint32_t textureMiplevel,
- uint32_t textureSlice,
+ uint32_t textureLayer,
Aspect aspect);
void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
@@ -65,13 +65,13 @@ namespace dawn_native { namespace d3d12 {
Aspect aspect);
void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
- const Texture2DCopySplit& baseCopySplit,
+ const TextureCopySubresource& baseCopySplit,
Buffer* buffer,
uint64_t baseOffset,
uint64_t bufferBytesPerRow,
Texture* texture,
uint32_t textureMiplevel,
- uint32_t textureSlice,
+ uint32_t textureLayer,
Aspect aspect);
void RecordCopyTextureToBuffer(ID3D12GraphicsCommandList* commandList,
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
index 4283a13a2cf..328ac8400d3 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
@@ -28,6 +28,19 @@ namespace dawn_native { namespace metal {
class CommandRecordingContext;
class Device;
+ class Texture;
+
+ void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+ id<MTLBuffer> mtlBuffer,
+ uint64_t bufferSize,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Texture* texture,
+ uint32_t mipLevel,
+ const Origin3D& origin,
+ Aspect aspect,
+ const Extent3D& copySize);
class CommandBuffer final : public CommandBufferBase {
public:
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
index 71f0d0f1280..04cb72d3922 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
@@ -544,6 +544,73 @@ namespace dawn_native { namespace metal {
} // anonymous namespace
+ void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+ id<MTLBuffer> mtlBuffer,
+ uint64_t bufferSize,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Texture* texture,
+ uint32_t mipLevel,
+ const Origin3D& origin,
+ Aspect aspect,
+ const Extent3D& copySize) {
+ TextureBufferCopySplit splitCopies =
+ ComputeTextureBufferCopySplit(texture, mipLevel, origin, copySize, bufferSize, offset,
+ bytesPerRow, rowsPerImage, aspect);
+
+ MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), aspect);
+
+ for (const auto& copyInfo : splitCopies) {
+ uint64_t bufferOffset = copyInfo.bufferOffset;
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e2D: {
+ const MTLOrigin textureOrigin =
+ MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+ const MTLSize copyExtent =
+ MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+
+ for (uint32_t z = copyInfo.textureOrigin.z;
+ z < copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers;
+ ++z) {
+ [commandContext->EnsureBlit() copyFromBuffer:mtlBuffer
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:copyExtent
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:z
+ destinationLevel:mipLevel
+ destinationOrigin:textureOrigin
+ options:blitOption];
+ bufferOffset += copyInfo.bytesPerImage;
+ }
+ break;
+ }
+ case wgpu::TextureDimension::e3D: {
+ [commandContext->EnsureBlit()
+ copyFromBuffer:mtlBuffer
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+ copyInfo.copyExtent.height,
+ copyInfo.copyExtent.depthOrArrayLayers)
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:0
+ destinationLevel:mipLevel
+ destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+ copyInfo.textureOrigin.y,
+ copyInfo.textureOrigin.z)
+ options:blitOption];
+ break;
+ }
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
+ }
+ }
+ }
+
// static
Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
@@ -551,25 +618,25 @@ namespace dawn_native { namespace metal {
}
MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
- const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
- size_t nextPassNumber = 0;
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
- auto LazyClearForPass = [](const PassResourceUsage& usages,
- CommandRecordingContext* commandContext) {
- for (size_t i = 0; i < usages.textures.size(); ++i) {
- Texture* texture = ToBackend(usages.textures[i]);
+ auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
+ CommandRecordingContext* commandContext) {
+ for (size_t i = 0; i < scope.textures.size(); ++i) {
+ Texture* texture = ToBackend(scope.textures[i]);
// Clear subresources that are not render attachments. Render attachments will be
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
// subresource has not been initialized before the render pass.
- usages.textureUsages[i].Iterate(
+ scope.textureUsages[i].Iterate(
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(range);
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
}
});
}
- for (BufferBase* bufferBase : usages.buffers) {
+ for (BufferBase* bufferBase : scope.buffers) {
ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
}
};
@@ -580,19 +647,23 @@ namespace dawn_native { namespace metal {
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
- LazyClearForPass(passResourceUsages[nextPassNumber], commandContext);
+ for (const SyncScopeResourceUsage& scope :
+ GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+ LazyClearSyncScope(scope, commandContext);
+ }
commandContext->EndBlit();
DAWN_TRY(EncodeComputePass(commandContext));
- nextPassNumber++;
+ nextComputePassNumber++;
break;
}
case Command::BeginRenderPass: {
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- LazyClearForPass(passResourceUsages[nextPassNumber], commandContext);
+ LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
+ commandContext);
commandContext->EndBlit();
LazyClearRenderPassAttachments(cmd);
@@ -600,7 +671,7 @@ namespace dawn_native { namespace metal {
DAWN_TRY(EncodeRenderPass(commandContext, descriptor.Get(), cmd->width,
cmd->height));
- nextPassNumber++;
+ nextRenderPassNumber++;
break;
}
@@ -630,42 +701,12 @@ namespace dawn_native { namespace metal {
Texture* texture = ToBackend(dst.texture.Get());
buffer->EnsureDataInitialized(commandContext);
- EnsureDestinationTextureInitialized(texture, copy->destination, copy->copySize);
-
- TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
- texture, dst.mipLevel, dst.origin, copySize, buffer->GetSize(), src.offset,
- src.bytesPerRow, src.rowsPerImage, dst.aspect);
-
- for (uint32_t i = 0; i < splitCopies.count; ++i) {
- const TextureBufferCopySplit::CopyInfo& copyInfo = splitCopies.copies[i];
-
- const uint32_t copyBaseLayer = copyInfo.textureOrigin.z;
- const uint32_t copyLayerCount = copyInfo.copyExtent.depthOrArrayLayers;
- const MTLOrigin textureOrigin =
- MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
- const MTLSize copyExtent =
- MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
-
- MTLBlitOption blitOption =
- ComputeMTLBlitOption(texture->GetFormat(), dst.aspect);
-
- uint64_t bufferOffset = copyInfo.bufferOffset;
- for (uint32_t copyLayer = copyBaseLayer;
- copyLayer < copyBaseLayer + copyLayerCount; ++copyLayer) {
- [commandContext->EnsureBlit() copyFromBuffer:buffer->GetMTLBuffer()
- sourceOffset:bufferOffset
- sourceBytesPerRow:copyInfo.bytesPerRow
- sourceBytesPerImage:copyInfo.bytesPerImage
- sourceSize:copyExtent
- toTexture:texture->GetMTLTexture()
- destinationSlice:copyLayer
- destinationLevel:dst.mipLevel
- destinationOrigin:textureOrigin
- options:blitOption];
- bufferOffset += copyInfo.bytesPerImage;
- }
- }
+ EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
+ RecordCopyBufferToTexture(commandContext, buffer->GetMTLBuffer(),
+ buffer->GetSize(), src.offset, src.bytesPerRow,
+ src.rowsPerImage, texture, dst.mipLevel, dst.origin,
+ dst.aspect, copySize);
break;
}
@@ -680,42 +721,66 @@ namespace dawn_native { namespace metal {
buffer->EnsureDataInitializedAsDestination(commandContext, copy);
texture->EnsureSubresourceContentInitialized(
- GetSubresourcesAffectedByCopy(src, copySize));
+ commandContext, GetSubresourcesAffectedByCopy(src, copySize));
TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
texture, src.mipLevel, src.origin, copySize, buffer->GetSize(), dst.offset,
dst.bytesPerRow, dst.rowsPerImage, src.aspect);
- for (uint32_t i = 0; i < splitCopies.count; ++i) {
- const TextureBufferCopySplit::CopyInfo& copyInfo = splitCopies.copies[i];
-
- const uint32_t copyBaseLayer = copyInfo.textureOrigin.z;
- const uint32_t copyLayerCount = copyInfo.copyExtent.depthOrArrayLayers;
- const MTLOrigin textureOrigin =
- MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
- const MTLSize copyExtent =
- MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
-
+ for (const auto& copyInfo : splitCopies) {
MTLBlitOption blitOption =
ComputeMTLBlitOption(texture->GetFormat(), src.aspect);
-
uint64_t bufferOffset = copyInfo.bufferOffset;
- for (uint32_t copyLayer = copyBaseLayer;
- copyLayer < copyBaseLayer + copyLayerCount; ++copyLayer) {
- [commandContext->EnsureBlit() copyFromTexture:texture->GetMTLTexture()
- sourceSlice:copyLayer
- sourceLevel:src.mipLevel
- sourceOrigin:textureOrigin
- sourceSize:copyExtent
- toBuffer:buffer->GetMTLBuffer()
- destinationOffset:bufferOffset
- destinationBytesPerRow:copyInfo.bytesPerRow
- destinationBytesPerImage:copyInfo.bytesPerImage
- options:blitOption];
- bufferOffset += copyInfo.bytesPerImage;
+
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e2D: {
+ const MTLOrigin textureOrigin = MTLOriginMake(
+ copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+ const MTLSize copyExtent = MTLSizeMake(
+ copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+
+ for (uint32_t z = copyInfo.textureOrigin.z;
+ z < copyInfo.textureOrigin.z +
+ copyInfo.copyExtent.depthOrArrayLayers;
+ ++z) {
+ [commandContext->EnsureBlit()
+ copyFromTexture:texture->GetMTLTexture()
+ sourceSlice:z
+ sourceLevel:src.mipLevel
+ sourceOrigin:textureOrigin
+ sourceSize:copyExtent
+ toBuffer:buffer->GetMTLBuffer()
+ destinationOffset:bufferOffset
+ destinationBytesPerRow:copyInfo.bytesPerRow
+ destinationBytesPerImage:copyInfo.bytesPerImage
+ options:blitOption];
+ bufferOffset += copyInfo.bytesPerImage;
+ }
+ break;
+ }
+ case wgpu::TextureDimension::e3D: {
+ [commandContext->EnsureBlit()
+ copyFromTexture:texture->GetMTLTexture()
+ sourceSlice:0
+ sourceLevel:src.mipLevel
+ sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+ copyInfo.textureOrigin.y,
+ copyInfo.textureOrigin.z)
+ sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+ copyInfo.copyExtent.height,
+ copyInfo.copyExtent
+ .depthOrArrayLayers)
+ toBuffer:buffer->GetMTLBuffer()
+ destinationOffset:bufferOffset
+ destinationBytesPerRow:copyInfo.bytesPerRow
+ destinationBytesPerImage:copyInfo.bytesPerImage
+ options:blitOption];
+ break;
+ }
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
}
}
-
break;
}
@@ -726,31 +791,56 @@ namespace dawn_native { namespace metal {
Texture* dstTexture = ToBackend(copy->destination.texture.Get());
srcTexture->EnsureSubresourceContentInitialized(
+ commandContext,
GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
- EnsureDestinationTextureInitialized(dstTexture, copy->destination,
- copy->copySize);
+ EnsureDestinationTextureInitialized(commandContext, dstTexture,
+ copy->destination, copy->copySize);
+
+ // TODO(jiawei.shao@intel.com): support copies with 1D textures.
+ ASSERT(srcTexture->GetDimension() != wgpu::TextureDimension::e1D &&
+ dstTexture->GetDimension() != wgpu::TextureDimension::e1D);
- // TODO(jiawei.shao@intel.com): support copies with 1D and 3D textures.
- ASSERT(srcTexture->GetDimension() == wgpu::TextureDimension::e2D &&
- dstTexture->GetDimension() == wgpu::TextureDimension::e2D);
- const MTLSize sizeOneLayer =
+ const MTLSize sizeOneSlice =
MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
- const MTLOrigin sourceOriginNoLayer =
- MTLOriginMake(copy->source.origin.x, copy->source.origin.y, 0);
- const MTLOrigin destinationOriginNoLayer =
- MTLOriginMake(copy->destination.origin.x, copy->destination.origin.y, 0);
- for (uint32_t slice = 0; slice < copy->copySize.depthOrArrayLayers; ++slice) {
+ uint32_t sourceLayer = 0;
+ uint32_t sourceOriginZ = 0;
+
+ uint32_t destinationLayer = 0;
+ uint32_t destinationOriginZ = 0;
+
+ uint32_t* sourceZPtr;
+ if (srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+ sourceZPtr = &sourceLayer;
+ } else {
+ sourceZPtr = &sourceOriginZ;
+ }
+
+ uint32_t* destinationZPtr;
+ if (dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+ destinationZPtr = &destinationLayer;
+ } else {
+ destinationZPtr = &destinationOriginZ;
+ }
+
+ // TODO(crbug.com/dawn/782): Do a single T2T copy if both are 3D.
+ for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
+ *sourceZPtr = copy->source.origin.z + z;
+ *destinationZPtr = copy->destination.origin.z + z;
+
[commandContext->EnsureBlit()
copyFromTexture:srcTexture->GetMTLTexture()
- sourceSlice:copy->source.origin.z + slice
+ sourceSlice:sourceLayer
sourceLevel:copy->source.mipLevel
- sourceOrigin:sourceOriginNoLayer
- sourceSize:sizeOneLayer
+ sourceOrigin:MTLOriginMake(copy->source.origin.x,
+ copy->source.origin.y, sourceOriginZ)
+ sourceSize:sizeOneSlice
toTexture:dstTexture->GetMTLTexture()
- destinationSlice:copy->destination.origin.z + slice
+ destinationSlice:destinationLayer
destinationLevel:copy->destination.mipLevel
- destinationOrigin:destinationOriginNoLayer];
+ destinationOrigin:MTLOriginMake(copy->destination.origin.x,
+ copy->destination.origin.y,
+ destinationOriginZ)];
}
break;
}
@@ -856,6 +946,11 @@ namespace dawn_native { namespace metal {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+ // Skip noop dispatches, it can causes issues on some systems.
+ if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
+ break;
+ }
+
bindGroups.Apply(encoder);
storageBufferLengths.Apply(encoder, lastPipeline);
@@ -1281,8 +1376,8 @@ namespace dawn_native { namespace metal {
break;
}
- case Command::SetBlendColor: {
- SetBlendColorCmd* cmd = mCommands.NextCommand<SetBlendColorCmd>();
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
[encoder setBlendColorRed:cmd->color.r
green:cmd->color.g
blue:cmd->color.b
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
index 5a8b3a1cf93..dc95fc8c4e1 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
@@ -298,46 +298,13 @@ namespace dawn_native { namespace metal {
TextureCopy* dst,
const Extent3D& copySizePixels) {
Texture* texture = ToBackend(dst->texture.Get());
+ EnsureDestinationTextureInitialized(GetPendingCommandContext(), texture, *dst,
+ copySizePixels);
- // This function assumes data is perfectly aligned. Otherwise, it might be necessary
- // to split copying to several stages: see ComputeTextureBufferCopySplit.
- const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(dst->aspect).block;
- ASSERT(dataLayout.rowsPerImage == copySizePixels.height / blockInfo.height);
- ASSERT(dataLayout.bytesPerRow ==
- copySizePixels.width / blockInfo.width * blockInfo.byteSize);
-
- EnsureDestinationTextureInitialized(texture, *dst, copySizePixels);
-
- // Metal validation layer requires that if the texture's pixel format is a compressed
- // format, the sourceSize must be a multiple of the pixel format's block size or be
- // clamped to the edge of the texture if the block extends outside the bounds of a
- // texture.
- const Extent3D clampedSize =
- texture->ClampToMipLevelVirtualSize(dst->mipLevel, dst->origin, copySizePixels);
- const uint32_t copyBaseLayer = dst->origin.z;
- const uint32_t copyLayerCount = copySizePixels.depthOrArrayLayers;
- const uint64_t bytesPerImage = dataLayout.rowsPerImage * dataLayout.bytesPerRow;
-
- MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), dst->aspect);
-
- uint64_t bufferOffset = dataLayout.offset;
- for (uint32_t copyLayer = copyBaseLayer; copyLayer < copyBaseLayer + copyLayerCount;
- ++copyLayer) {
- [GetPendingCommandContext()->EnsureBlit()
- copyFromBuffer:ToBackend(source)->GetBufferHandle()
- sourceOffset:bufferOffset
- sourceBytesPerRow:dataLayout.bytesPerRow
- sourceBytesPerImage:bytesPerImage
- sourceSize:MTLSizeMake(clampedSize.width, clampedSize.height, 1)
- toTexture:texture->GetMTLTexture()
- destinationSlice:copyLayer
- destinationLevel:dst->mipLevel
- destinationOrigin:MTLOriginMake(dst->origin.x, dst->origin.y, 0)
- options:blitOption];
-
- bufferOffset += bytesPerImage;
- }
-
+ RecordCopyBufferToTexture(GetPendingCommandContext(), ToBackend(source)->GetBufferHandle(),
+ source->GetSize(), dataLayout.offset, dataLayout.bytesPerRow,
+ dataLayout.rowsPerImage, texture, dst->mipLevel, dst->origin,
+ dst->aspect, copySizePixels);
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
index 8da86184872..a79228c8d62 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
@@ -135,17 +135,17 @@ namespace dawn_native { namespace metal {
return MTLBlendFactorZero;
case wgpu::BlendFactor::One:
return MTLBlendFactorOne;
- case wgpu::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::Src:
return MTLBlendFactorSourceColor;
- case wgpu::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::OneMinusSrc:
return MTLBlendFactorOneMinusSourceColor;
case wgpu::BlendFactor::SrcAlpha:
return MTLBlendFactorSourceAlpha;
case wgpu::BlendFactor::OneMinusSrcAlpha:
return MTLBlendFactorOneMinusSourceAlpha;
- case wgpu::BlendFactor::DstColor:
+ case wgpu::BlendFactor::Dst:
return MTLBlendFactorDestinationColor;
- case wgpu::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::OneMinusDst:
return MTLBlendFactorOneMinusDestinationColor;
case wgpu::BlendFactor::DstAlpha:
return MTLBlendFactorDestinationAlpha;
@@ -153,11 +153,20 @@ namespace dawn_native { namespace metal {
return MTLBlendFactorOneMinusDestinationAlpha;
case wgpu::BlendFactor::SrcAlphaSaturated:
return MTLBlendFactorSourceAlphaSaturated;
- case wgpu::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::Constant:
return alpha ? MTLBlendFactorBlendAlpha : MTLBlendFactorBlendColor;
- case wgpu::BlendFactor::OneMinusBlendColor:
+ case wgpu::BlendFactor::OneMinusConstant:
return alpha ? MTLBlendFactorOneMinusBlendAlpha
: MTLBlendFactorOneMinusBlendColor;
+
+ // Deprecated blend factors should be normalized prior to this call.
+ case wgpu::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::DstColor:
+ case wgpu::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::OneMinusBlendColor:
+ UNREACHABLE();
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
index d302ce7fc80..22c15ee72b5 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
@@ -70,10 +70,12 @@ namespace dawn_native { namespace metal {
errorStream << "Tint MSL failure:" << std::endl;
tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
+
if (stage == SingleShaderStage::Vertex &&
GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
- transformManager.append(
- MakeVertexPullingTransform(*vertexState, entryPointName, kPullingBufferBindingSet));
+ AddVertexPullingTransformConfig(*vertexState, entryPointName, kPullingBufferBindingSet,
+ &transformInputs);
for (VertexBufferSlot slot :
IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
@@ -83,20 +85,18 @@ namespace dawn_native { namespace metal {
// this MSL buffer index.
}
}
- transformManager.append(std::make_unique<tint::transform::BoundArrayAccessors>());
- transformManager.append(std::make_unique<tint::transform::Renamer>());
- transformManager.append(std::make_unique<tint::transform::Msl>());
-
- tint::transform::Transform::Output output = transformManager.Run(GetTintProgram());
-
- tint::Program& program = output.program;
- if (!program.IsValid()) {
- errorStream << "Tint program transform error: " << program.Diagnostics().str()
- << std::endl;
- return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ if (GetDevice()->IsRobustnessEnabled()) {
+ transformManager.Add<tint::transform::BoundArrayAccessors>();
}
+ transformManager.Add<tint::transform::Renamer>();
+ transformManager.Add<tint::transform::Msl>();
+
+ tint::Program program;
+ tint::transform::DataMap transformOutputs;
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+ &transformOutputs, nullptr));
- if (auto* data = output.data.Get<tint::transform::Renamer::Data>()) {
+ if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
auto it = data->remappings.find(entryPointName);
if (it == data->remappings.end()) {
return DAWN_VALIDATION_ERROR("Could not find remapped name for entry point.");
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
index 1265a4ecc37..9c0f4b396fb 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
@@ -25,6 +25,7 @@
namespace dawn_native { namespace metal {
+ class CommandRecordingContext;
class Device;
MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format);
@@ -48,7 +49,8 @@ namespace dawn_native { namespace metal {
id<MTLTexture> GetMTLTexture();
- void EnsureSubresourceContentInitialized(const SubresourceRange& range);
+ void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range);
private:
Texture(Device* device, const TextureDescriptor* descriptor);
@@ -56,7 +58,9 @@ namespace dawn_native { namespace metal {
void DestroyImpl() override;
- MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
+ MaybeError ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue);
NSPRef<id<MTLTexture>> mMtlTexture;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
index 29485ba9bef..355cf91f3e2 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
@@ -359,8 +359,9 @@ namespace dawn_native { namespace metal {
AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()]);
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- device->ConsumedError(
- ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
+ device->ConsumedError(ClearTexture(device->GetPendingCommandContext(),
+ GetAllSubresources(),
+ TextureBase::ClearValue::NonZero));
}
}
@@ -401,12 +402,11 @@ namespace dawn_native { namespace metal {
return mMtlTexture.Get();
}
- MaybeError Texture::ClearTexture(const SubresourceRange& range,
+ MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
TextureBase::ClearValue clearValue) {
Device* device = ToBackend(GetDevice());
- CommandRecordingContext* commandContext = device->GetPendingCommandContext();
-
const uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
const double dClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.0 : 1.0;
@@ -445,6 +445,7 @@ namespace dawn_native { namespace metal {
continue;
}
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
switch (aspect) {
case Aspect::Depth:
descriptor.depthAttachment.texture = GetMTLTexture();
@@ -482,6 +483,8 @@ namespace dawn_native { namespace metal {
NSRef<MTLRenderPassDescriptor> descriptor;
uint32_t attachment = 0;
+ uint32_t numZSlices = GetMipLevelVirtualSize(level).depthOrArrayLayers;
+
for (uint32_t arrayLayer = range.baseArrayLayer;
arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
if (clearValue == TextureBase::ClearValue::Zero &&
@@ -491,28 +494,33 @@ namespace dawn_native { namespace metal {
continue;
}
- if (descriptor == nullptr) {
- // Note that this creates a descriptor that's autoreleased so we don't
- // use AcquireNSRef
- descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
- }
+ for (uint32_t z = 0; z < numZSlices; ++z) {
+ if (descriptor == nullptr) {
+ // Note that this creates a descriptor that's autoreleased so we
+ // don't use AcquireNSRef
+ descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
+ }
- [*descriptor colorAttachments][attachment].texture = GetMTLTexture();
- [*descriptor colorAttachments][attachment].loadAction = MTLLoadActionClear;
- [*descriptor colorAttachments][attachment].storeAction =
- MTLStoreActionStore;
- [*descriptor colorAttachments][attachment].clearColor =
- MTLClearColorMake(dClearColor, dClearColor, dClearColor, dClearColor);
- [*descriptor colorAttachments][attachment].level = level;
- [*descriptor colorAttachments][attachment].slice = arrayLayer;
-
- attachment++;
-
- if (attachment == kMaxColorAttachments) {
- attachment = 0;
- commandContext->BeginRender(descriptor.Get());
- commandContext->EndRender();
- descriptor = nullptr;
+ [*descriptor colorAttachments][attachment].texture = GetMTLTexture();
+ [*descriptor colorAttachments][attachment].loadAction =
+ MTLLoadActionClear;
+ [*descriptor colorAttachments][attachment].storeAction =
+ MTLStoreActionStore;
+ [*descriptor colorAttachments][attachment].clearColor =
+ MTLClearColorMake(dClearColor, dClearColor, dClearColor,
+ dClearColor);
+ [*descriptor colorAttachments][attachment].level = level;
+ [*descriptor colorAttachments][attachment].slice = arrayLayer;
+ [*descriptor colorAttachments][attachment].depthPlane = z;
+
+ attachment++;
+
+ if (attachment == kMaxColorAttachments) {
+ attachment = 0;
+ commandContext->BeginRender(descriptor.Get());
+ commandContext->EndRender();
+ descriptor = nullptr;
+ }
}
}
@@ -538,9 +546,7 @@ namespace dawn_native { namespace metal {
(largestMipSize.height / blockInfo.height),
512llu);
- // TODO(enga): Multiply by largestMipSize.depthOrArrayLayers and do a larger 3D copy to
- // clear a whole range of subresources when tracking that is improved.
- uint64_t bufferSize = largestMipBytesPerImage * 1;
+ uint64_t bufferSize = largestMipBytesPerImage * largestMipSize.depthOrArrayLayers;
if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
@@ -577,7 +583,7 @@ namespace dawn_native { namespace metal {
sourceBytesPerRow:largestMipBytesPerRow
sourceBytesPerImage:largestMipBytesPerImage
sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
- 1)
+ virtualSize.depthOrArrayLayers)
toTexture:GetMTLTexture()
destinationSlice:arrayLayer
destinationLevel:level
@@ -595,14 +601,16 @@ namespace dawn_native { namespace metal {
return {};
}
- void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
+ void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range) {
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
}
if (!IsSubresourceContentInitialized(range)) {
// If subresource has not been initialized, clear it to black as it could
// contain dirty bits from recycled memory
- GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
+ GetDevice()->ConsumedError(
+ ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
index a7a5dee0db2..6855734f8e0 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
@@ -38,6 +38,14 @@ namespace dawn_native { namespace metal {
uint32_t count = 0;
std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
+
+ auto begin() const {
+ return copies.begin();
+ }
+
+ auto end() const {
+ return copies.begin() + count;
+ }
};
TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
@@ -50,7 +58,8 @@ namespace dawn_native { namespace metal {
uint32_t rowsPerImage,
Aspect aspect);
- void EnsureDestinationTextureInitialized(Texture* texture,
+ void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+ Texture* texture,
const TextureCopy& dst,
const Extent3D& size);
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
index bc41ec52244..51fa99325c0 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
@@ -82,7 +82,7 @@ namespace dawn_native { namespace metal {
const Extent3D clampedCopyExtent =
texture->ClampToMipLevelVirtualSize(mipLevel, origin, copyExtent);
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
// Check whether buffer size is big enough.
bool needWorkaround =
@@ -154,7 +154,8 @@ namespace dawn_native { namespace metal {
return copy;
}
- void EnsureDestinationTextureInitialized(Texture* texture,
+ void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+ Texture* texture,
const TextureCopy& dst,
const Extent3D& size) {
ASSERT(texture == dst.texture.Get());
@@ -162,7 +163,7 @@ namespace dawn_native { namespace metal {
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
texture->SetIsSubresourceContentInitialized(true, range);
} else {
- texture->EnsureSubresourceContentInitialized(range);
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
index 6573d12f296..1fbbbba0d6f 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
@@ -492,7 +492,8 @@ namespace dawn_native { namespace opengl {
case Aspect::Plane1:
UNREACHABLE();
}
- if (srcTexture->GetArrayLayers() == 1) {
+ if (srcTexture->GetArrayLayers() == 1 &&
+ srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment,
srcTexture->GetGLTarget(), srcTexture->GetHandle(),
src.mipLevel);
@@ -502,7 +503,8 @@ namespace dawn_native { namespace opengl {
static_cast<GLint>(src.mipLevel),
static_cast<GLint>(src.origin.z + layer));
}
- if (dstTexture->GetArrayLayers() == 1) {
+ if (dstTexture->GetArrayLayers() == 1 &&
+ dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment,
dstTexture->GetGLTarget(), dstTexture->GetHandle(),
dst.mipLevel);
@@ -536,14 +538,14 @@ namespace dawn_native { namespace opengl {
MaybeError CommandBuffer::Execute() {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- auto TransitionForPass = [](const PassResourceUsage& usages) {
- for (size_t i = 0; i < usages.textures.size(); i++) {
- Texture* texture = ToBackend(usages.textures[i]);
+ auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
+ for (size_t i = 0; i < scope.textures.size(); i++) {
+ Texture* texture = ToBackend(scope.textures[i]);
// Clear subresources that are not render attachments. Render attachments will be
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
// subresource has not been initialized before the render pass.
- usages.textureUsages[i].Iterate(
+ scope.textureUsages[i].Iterate(
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
texture->EnsureSubresourceContentInitialized(range);
@@ -551,34 +553,36 @@ namespace dawn_native { namespace opengl {
});
}
- for (BufferBase* bufferBase : usages.buffers) {
+ for (BufferBase* bufferBase : scope.buffers) {
ToBackend(bufferBase)->EnsureDataInitialized();
}
};
- const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
- uint32_t nextPassNumber = 0;
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
Command type;
while (mCommands.NextCommandId(&type)) {
switch (type) {
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
- TransitionForPass(passResourceUsages[nextPassNumber]);
+ for (const SyncScopeResourceUsage& scope :
+ GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+ LazyClearSyncScope(scope);
+ }
DAWN_TRY(ExecuteComputePass());
- nextPassNumber++;
+ nextComputePassNumber++;
break;
}
case Command::BeginRenderPass: {
auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- TransitionForPass(passResourceUsages[nextPassNumber]);
-
+ LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
LazyClearRenderPassAttachments(cmd);
DAWN_TRY(ExecuteRenderPass(cmd));
- nextPassNumber++;
+ nextRenderPassNumber++;
break;
}
@@ -604,11 +608,7 @@ namespace dawn_native { namespace opengl {
CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
auto& src = copy->source;
auto& dst = copy->destination;
- auto& copySize = copy->copySize;
Buffer* buffer = ToBackend(src.buffer.Get());
- Texture* texture = ToBackend(dst.texture.Get());
- GLenum target = texture->GetGLTarget();
- const GLFormat& format = texture->GetGLFormat();
if (dst.aspect == Aspect::Stencil) {
return DAWN_VALIDATION_ERROR(
@@ -617,148 +617,23 @@ namespace dawn_native { namespace opengl {
ASSERT(dst.aspect == Aspect::Color);
buffer->EnsureDataInitialized();
-
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- SubresourceRange subresources =
- GetSubresourcesAffectedByCopy(dst, copy->copySize);
- if (IsCompleteSubresourceCopiedTo(texture, copySize, dst.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, subresources);
+ SubresourceRange range = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+ dst.mipLevel)) {
+ dst.texture->SetIsSubresourceContentInitialized(true, range);
} else {
- texture->EnsureSubresourceContentInitialized(subresources);
+ ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
}
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
- gl.ActiveTexture(GL_TEXTURE0);
- gl.BindTexture(target, texture->GetHandle());
-
- const Format& formatInfo = texture->GetFormat();
- const TexelBlockInfo& blockInfo = formatInfo.GetAspectInfo(dst.aspect).block;
-
- if (formatInfo.isCompressed) {
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
-
- Extent3D copyExtent = ComputeTextureCopyExtent(dst, copySize);
-
- // In GLES glPixelStorei() doesn't affect CompressedTexSubImage*D() and
- // GL_UNPACK_COMPRESSED_BLOCK_* isn't defined, so we have to workaround
- // this limitation by copying the compressed texture data once per row.
- // See OpenGL ES 3.2 SPEC Chapter 8.4.1, "Pixel Storage Modes and Pixel
- // Buffer Objects" for more details.
- if (gl.GetVersion().IsES()) {
- uint64_t copyDataSizePerBlockRow =
- (copySize.width / blockInfo.width) * blockInfo.byteSize;
- size_t copyBlockRowsPerImage = copySize.height / blockInfo.height;
-
- if (texture->GetArrayLayers() > 1) {
- // TODO(jiawei.shao@intel.com): do a single copy when the data is
- // correctly packed.
- for (size_t copyZ = 0; copyZ < copyExtent.depthOrArrayLayers;
- ++copyZ) {
- uintptr_t offsetPerImage = static_cast<uintptr_t>(
- src.offset + copyZ * src.bytesPerRow * src.rowsPerImage);
- uint32_t dstOriginY = dst.origin.y;
- uint32_t dstOriginZ = dst.origin.z + copyZ;
-
- for (size_t copyBlockRow = 0;
- copyBlockRow < copyBlockRowsPerImage; ++copyBlockRow) {
- gl.CompressedTexSubImage3D(
- target, dst.mipLevel, dst.origin.x, dstOriginY,
- dstOriginZ, copyExtent.width, blockInfo.height, 1,
- format.internalFormat, copyDataSizePerBlockRow,
- reinterpret_cast<void*>(
- static_cast<uintptr_t>(offsetPerImage)));
-
- offsetPerImage += src.bytesPerRow;
- dstOriginY += blockInfo.height;
- }
- }
- } else {
- uintptr_t offset = static_cast<uintptr_t>(src.offset);
- uint32_t dstOriginY = dst.origin.y;
-
- // TODO(jiawei.shao@intel.com): do a single copy when the data is
- // correctly packed.
- for (size_t copyBlockRow = 0; copyBlockRow < copyBlockRowsPerImage;
- ++copyBlockRow) {
- gl.CompressedTexSubImage2D(
- target, dst.mipLevel, dst.origin.x, dstOriginY,
- copyExtent.width, blockInfo.height, format.internalFormat,
- copyDataSizePerBlockRow,
- reinterpret_cast<void*>(static_cast<uintptr_t>(offset)));
-
- offset += src.bytesPerRow;
- dstOriginY += blockInfo.height;
- }
- }
-
- } else {
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
- src.bytesPerRow / blockInfo.byteSize * blockInfo.width);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
- src.rowsPerImage * blockInfo.height);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, blockInfo.byteSize);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, blockInfo.width);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, blockInfo.height);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
-
- uint64_t copyDataSize = (copySize.width / blockInfo.width) *
- (copySize.height / blockInfo.height) *
- blockInfo.byteSize *
- copySize.depthOrArrayLayers;
-
- if (texture->GetArrayLayers() > 1) {
- gl.CompressedTexSubImage3D(
- target, dst.mipLevel, dst.origin.x, dst.origin.y, dst.origin.z,
- copyExtent.width, copyExtent.height,
- copyExtent.depthOrArrayLayers, format.internalFormat,
- copyDataSize,
- reinterpret_cast<void*>(static_cast<uintptr_t>(src.offset)));
- } else {
- gl.CompressedTexSubImage2D(
- target, dst.mipLevel, dst.origin.x, dst.origin.y,
- copyExtent.width, copyExtent.height, format.internalFormat,
- copyDataSize,
- reinterpret_cast<void*>(static_cast<uintptr_t>(src.offset)));
- }
-
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 0);
- }
- } else {
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
- src.bytesPerRow / blockInfo.byteSize * blockInfo.width);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, src.rowsPerImage * blockInfo.height);
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e2D:
- if (texture->GetArrayLayers() > 1) {
- gl.TexSubImage3D(target, dst.mipLevel, dst.origin.x,
- dst.origin.y, dst.origin.z, copySize.width,
- copySize.height, copySize.depthOrArrayLayers,
- format.format, format.type,
- reinterpret_cast<void*>(
- static_cast<uintptr_t>(src.offset)));
- } else {
- gl.TexSubImage2D(target, dst.mipLevel, dst.origin.x,
- dst.origin.y, copySize.width, copySize.height,
- format.format, format.type,
- reinterpret_cast<void*>(
- static_cast<uintptr_t>(src.offset)));
- }
- break;
-
- case wgpu::TextureDimension::e1D:
- case wgpu::TextureDimension::e3D:
- UNREACHABLE();
- }
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
- }
+ TextureDataLayout dataLayout;
+ dataLayout.offset = 0;
+ dataLayout.bytesPerRow = src.bytesPerRow;
+ dataLayout.rowsPerImage = src.rowsPerImage;
+ DoTexSubImage(gl, dst, reinterpret_cast<void*>(src.offset), dataLayout,
+ copy->copySize);
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
break;
}
@@ -785,7 +660,7 @@ namespace dawn_native { namespace opengl {
buffer->EnsureDataInitializedAsDestination(copy);
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
SubresourceRange subresources =
GetSubresourcesAffectedByCopy(src, copy->copySize);
texture->EnsureSubresourceContentInitialized(subresources);
@@ -840,23 +715,25 @@ namespace dawn_native { namespace opengl {
copySize.height, glFormat, glType, offset);
break;
}
+ // Implementation for 2D array is the same as 3D.
+ DAWN_FALLTHROUGH;
+ }
+ case wgpu::TextureDimension::e3D: {
const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
- for (uint32_t layer = 0; layer < copySize.depthOrArrayLayers; ++layer) {
+ for (uint32_t z = 0; z < copySize.depthOrArrayLayers; ++z) {
gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
texture->GetHandle(), src.mipLevel,
- src.origin.z + layer);
+ src.origin.z + z);
gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
copySize.height, glFormat, glType, offset);
offset += bytesPerImage;
}
-
break;
}
case wgpu::TextureDimension::e1D:
- case wgpu::TextureDimension::e3D:
UNREACHABLE();
}
@@ -1354,8 +1231,8 @@ namespace dawn_native { namespace opengl {
break;
}
- case Command::SetBlendColor: {
- SetBlendColorCmd* cmd = mCommands.NextCommand<SetBlendColorCmd>();
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
const std::array<float, 4> blendColor = ConvertToFloatColor(cmd->color);
gl.BlendColor(blendColor[0], blendColor[1], blendColor[2], blendColor[3]);
break;
@@ -1397,4 +1274,138 @@ namespace dawn_native { namespace opengl {
UNREACHABLE();
}
+ void DoTexSubImage(const OpenGLFunctions& gl,
+ const TextureCopy& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& copySize) {
+ Texture* texture = ToBackend(destination.texture.Get());
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+
+ const GLFormat& format = texture->GetGLFormat();
+ GLenum target = texture->GetGLTarget();
+ data = static_cast<const uint8_t*>(data) + dataLayout.offset;
+ gl.ActiveTexture(GL_TEXTURE0);
+ gl.BindTexture(target, texture->GetHandle());
+ const TexelBlockInfo& blockInfo =
+ texture->GetFormat().GetAspectInfo(destination.aspect).block;
+
+ uint32_t x = destination.origin.x;
+ uint32_t y = destination.origin.y;
+ uint32_t z = destination.origin.z;
+ if (texture->GetFormat().isCompressed) {
+ size_t rowSize = copySize.width / blockInfo.width * blockInfo.byteSize;
+ Extent3D virtSize = texture->GetMipLevelVirtualSize(destination.mipLevel);
+ uint32_t width = std::min(copySize.width, virtSize.width - x);
+
+ // In GLES glPixelStorei() doesn't affect CompressedTexSubImage*D() and
+ // GL_UNPACK_COMPRESSED_BLOCK_* isn't defined, so we have to workaround
+ // this limitation by copying the compressed texture data once per row.
+ // See OpenGL ES 3.2 SPEC Chapter 8.4.1, "Pixel Storage Modes and Pixel
+ // Buffer Objects" for more details. For Desktop GL, we use row-by-row
+ // copies only for uploads where bytesPerRow is not a multiple of byteSize.
+ if (dataLayout.bytesPerRow % blockInfo.byteSize == 0 && gl.GetVersion().IsDesktop()) {
+ size_t imageSize =
+ rowSize * (copySize.height / blockInfo.height) * copySize.depthOrArrayLayers;
+
+ uint32_t height = std::min(copySize.height, virtSize.height - y);
+
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+ dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, blockInfo.byteSize);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, blockInfo.width);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, blockInfo.height);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
+
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
+ format.internalFormat, imageSize, data);
+ } else {
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
+ dataLayout.rowsPerImage * blockInfo.height);
+ gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+ copySize.depthOrArrayLayers, format.internalFormat,
+ imageSize, data);
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+ }
+
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 0);
+ } else {
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ const uint8_t* d = static_cast<const uint8_t*>(data);
+
+ for (; y < destination.origin.y + copySize.height; y += blockInfo.height) {
+ uint32_t height = std::min(blockInfo.height, virtSize.height - y);
+ gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width,
+ height, format.internalFormat, rowSize, d);
+ d += dataLayout.bytesPerRow;
+ }
+ } else {
+ const uint8_t* slice = static_cast<const uint8_t*>(data);
+
+ for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+ const uint8_t* d = slice;
+
+ for (y = destination.origin.y; y < destination.origin.y + copySize.height;
+ y += blockInfo.height) {
+ uint32_t height = std::min(blockInfo.height, virtSize.height - y);
+ gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width,
+ height, 1, format.internalFormat, rowSize,
+ d);
+ d += dataLayout.bytesPerRow;
+ }
+
+ slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
+ }
+ }
+ }
+ } else {
+ uint32_t width = copySize.width;
+ uint32_t height = copySize.height;
+ if (dataLayout.bytesPerRow % blockInfo.byteSize == 0) {
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+ dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ gl.TexSubImage2D(target, destination.mipLevel, x, y, width, height,
+ format.format, format.type, data);
+ } else {
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
+ dataLayout.rowsPerImage * blockInfo.height);
+ gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+ copySize.depthOrArrayLayers, format.format, format.type, data);
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+ }
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ } else {
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ const uint8_t* d = static_cast<const uint8_t*>(data);
+ for (; y < destination.origin.y + height; ++y) {
+ gl.TexSubImage2D(target, destination.mipLevel, x, y, width, 1,
+ format.format, format.type, d);
+ d += dataLayout.bytesPerRow;
+ }
+ } else {
+ const uint8_t* slice = static_cast<const uint8_t*>(data);
+ for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+ const uint8_t* d = slice;
+ for (y = destination.origin.y; y < destination.origin.y + height; ++y) {
+ gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, 1, 1,
+ format.format, format.type, d);
+ d += dataLayout.bytesPerRow;
+ }
+ slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
+ }
+ }
+ }
+ }
+ }
+
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
index c21f5746687..fde8751ef5a 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
@@ -24,6 +24,7 @@ namespace dawn_native {
namespace dawn_native { namespace opengl {
class Device;
+ struct OpenGLFunctions;
class CommandBuffer final : public CommandBufferBase {
public:
@@ -36,6 +37,13 @@ namespace dawn_native { namespace opengl {
MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
};
+ // Like glTexSubImage*, the "data" argument is either a pointer to image data or
+ // an offset if a PBO is bound.
+ void DoTexSubImage(const OpenGLFunctions& gl,
+ const TextureCopy& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& copySize);
}} // namespace dawn_native::opengl
#endif // DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
index a0181efafee..01100254b4b 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
@@ -119,125 +119,58 @@ namespace dawn_native { namespace opengl {
}
}
- // The uniforms are part of the program state so we can pre-bind buffer units, texture units
- // etc.
+ // Compute links between stages for combined samplers, then bind them to texture units
gl.UseProgram(mProgram);
const auto& indices = layout->GetBindingIndexInfo();
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(group);
-
- for (const auto& it : bgl->GetBindingMap()) {
- BindingNumber bindingNumber = it.first;
- BindingIndex bindingIndex = it.second;
-
- std::string name = GetBindingName(group, bindingNumber);
- const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform: {
- GLint location = gl.GetUniformBlockIndex(mProgram, name.c_str());
- if (location != -1) {
- gl.UniformBlockBinding(mProgram, location,
- indices[group][bindingIndex]);
- }
- break;
- }
- case wgpu::BufferBindingType::Storage:
- case wgpu::BufferBindingType::ReadOnlyStorage: {
- // Since glShaderStorageBlockBinding doesn't exist in OpenGL ES, we
- // skip that call and handle it during shader translation by
- // modifying the location decoration. Contrary to all other binding
- // types, OpenGL ES's SSBO binding index in the SSBO table is the
- // value of the location= decoration in GLSL.
- if (gl.GetVersion().IsDesktop()) {
- GLuint location = gl.GetProgramResourceIndex(
- mProgram, GL_SHADER_STORAGE_BLOCK, name.c_str());
- if (location != GL_INVALID_INDEX) {
- gl.ShaderStorageBlockBinding(mProgram, location,
- indices[group][bindingIndex]);
- }
- }
- break;
- }
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- break;
-
- case BindingInfoType::Sampler:
- case BindingInfoType::Texture:
- // These binding types are handled in the separate sampler and texture
- // emulation
- break;
-
- case BindingInfoType::StorageTexture: {
- if (gl.GetVersion().IsDesktop()) {
- GLint location = gl.GetUniformLocation(mProgram, name.c_str());
- if (location != -1) {
- gl.Uniform1i(location, indices[group][bindingIndex]);
- }
- }
- break;
- }
- }
+ std::set<CombinedSampler> combinedSamplersSet;
+ for (SingleShaderStage stage : IterateStages(activeStages)) {
+ for (const CombinedSampler& combined : combinedSamplers[stage]) {
+ combinedSamplersSet.insert(combined);
}
}
- // Compute links between stages for combined samplers, then bind them to texture units
- {
- std::set<CombinedSampler> combinedSamplersSet;
- for (SingleShaderStage stage : IterateStages(activeStages)) {
- for (const CombinedSampler& combined : combinedSamplers[stage]) {
- combinedSamplersSet.insert(combined);
- }
- }
+ mUnitsForSamplers.resize(layout->GetNumSamplers());
+ mUnitsForTextures.resize(layout->GetNumSampledTextures());
+
+ GLuint textureUnit = layout->GetTextureUnitsUsed();
+ for (const auto& combined : combinedSamplersSet) {
+ const std::string& name = combined.GetName();
+ GLint location = gl.GetUniformLocation(mProgram, name.c_str());
- mUnitsForSamplers.resize(layout->GetNumSamplers());
- mUnitsForTextures.resize(layout->GetNumSampledTextures());
+ if (location == -1) {
+ continue;
+ }
- GLuint textureUnit = layout->GetTextureUnitsUsed();
- for (const auto& combined : combinedSamplersSet) {
- std::string name = combined.GetName();
- GLint location = gl.GetUniformLocation(mProgram, name.c_str());
+ gl.Uniform1i(location, textureUnit);
- if (location == -1) {
- continue;
- }
+ bool shouldUseFiltering;
+ {
+ const BindGroupLayoutBase* bgl =
+ layout->GetBindGroupLayout(combined.textureLocation.group);
+ BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
- gl.Uniform1i(location, textureUnit);
+ GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
+ mUnitsForTextures[textureIndex].push_back(textureUnit);
- bool shouldUseFiltering;
- {
+ shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
+ wgpu::TextureSampleType::Float;
+ }
+ {
+ if (combined.useDummySampler) {
+ mDummySamplerUnits.push_back(textureUnit);
+ } else {
const BindGroupLayoutBase* bgl =
- layout->GetBindGroupLayout(combined.textureLocation.group);
+ layout->GetBindGroupLayout(combined.samplerLocation.group);
BindingIndex bindingIndex =
- bgl->GetBindingIndex(combined.textureLocation.binding);
-
- GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
- mUnitsForTextures[textureIndex].push_back(textureUnit);
+ bgl->GetBindingIndex(combined.samplerLocation.binding);
- shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
- wgpu::TextureSampleType::Float;
+ GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
+ mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
}
- {
- if (combined.useDummySampler) {
- mDummySamplerUnits.push_back(textureUnit);
- } else {
- const BindGroupLayoutBase* bgl =
- layout->GetBindGroupLayout(combined.samplerLocation.group);
- BindingIndex bindingIndex =
- bgl->GetBindingIndex(combined.samplerLocation.binding);
-
- GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
- mUnitsForSamplers[samplerIndex].push_back(
- {textureUnit, shouldUseFiltering});
- }
- }
-
- textureUnit++;
}
+
+ textureUnit++;
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
index 17260b925e1..9a6335edacf 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
@@ -56,101 +56,21 @@ namespace dawn_native { namespace opengl {
const void* data,
const TextureDataLayout& dataLayout,
const Extent3D& writeSizePixel) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- Texture* texture = ToBackend(destination.texture);
- SubresourceRange range(Aspect::Color,
- {destination.origin.z, writeSizePixel.depthOrArrayLayers},
- {destination.mipLevel, 1});
- if (IsCompleteSubresourceCopiedTo(texture, writeSizePixel, destination.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- texture->EnsureSubresourceContentInitialized(range);
- }
-
- const GLFormat& format = texture->GetGLFormat();
- GLenum target = texture->GetGLTarget();
- data = static_cast<const uint8_t*>(data) + dataLayout.offset;
- gl.BindTexture(target, texture->GetHandle());
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(destination.aspect).block;
-
- if (texture->GetFormat().isCompressed) {
- size_t imageSize = writeSizePixel.width / blockInfo.width * blockInfo.byteSize;
- Extent3D virtSize = texture->GetMipLevelVirtualSize(destination.mipLevel);
- uint32_t width = std::min(writeSizePixel.width, virtSize.width - destination.origin.x);
- uint32_t x = destination.origin.x;
-
- // For now, we use row-by-row texture uploads of compressed textures in all cases.
- // TODO(crbug.com/dawn/684): For contiguous cases, we should be able to use a single
- // texture upload per layer, as we do in the non-compressed case.
- if (texture->GetArrayLayers() == 1) {
- const uint8_t* d = static_cast<const uint8_t*>(data);
-
- for (uint32_t y = destination.origin.y;
- y < destination.origin.y + writeSizePixel.height; y += blockInfo.height) {
- uint32_t height = std::min(blockInfo.height, virtSize.height - y);
- gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
- format.internalFormat, imageSize, d);
- d += dataLayout.bytesPerRow;
- }
- } else {
- const uint8_t* slice = static_cast<const uint8_t*>(data);
-
- for (uint32_t z = destination.origin.z;
- z < destination.origin.z + writeSizePixel.depthOrArrayLayers; ++z) {
- const uint8_t* d = slice;
-
- for (uint32_t y = destination.origin.y;
- y < destination.origin.y + writeSizePixel.height; y += blockInfo.height) {
- uint32_t height = std::min(blockInfo.height, virtSize.height - y);
- gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width,
- height, 1, format.internalFormat, imageSize, d);
- d += dataLayout.bytesPerRow;
- }
-
- slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
- }
- }
- } else if (dataLayout.bytesPerRow % blockInfo.byteSize == 0) {
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
- dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
- if (texture->GetArrayLayers() == 1) {
- gl.TexSubImage2D(target, destination.mipLevel, destination.origin.x,
- destination.origin.y, writeSizePixel.width, writeSizePixel.height,
- format.format, format.type, data);
- } else {
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, dataLayout.rowsPerImage * blockInfo.height);
- gl.TexSubImage3D(target, destination.mipLevel, destination.origin.x,
- destination.origin.y, destination.origin.z, writeSizePixel.width,
- writeSizePixel.height, writeSizePixel.depthOrArrayLayers,
- format.format, format.type, data);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
- }
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ TextureCopy textureCopy;
+ textureCopy.texture = destination.texture;
+ textureCopy.mipLevel = destination.mipLevel;
+ textureCopy.origin = destination.origin;
+ textureCopy.aspect =
+ SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
+
+ SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
+ if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel,
+ destination.mipLevel)) {
+ destination.texture->SetIsSubresourceContentInitialized(true, range);
} else {
- if (texture->GetArrayLayers() == 1) {
- const uint8_t* d = static_cast<const uint8_t*>(data);
- for (uint32_t y = 0; y < writeSizePixel.height; ++y) {
- gl.TexSubImage2D(target, destination.mipLevel, destination.origin.x,
- destination.origin.y + y, writeSizePixel.width, 1,
- format.format, format.type, d);
- d += dataLayout.bytesPerRow;
- }
- } else {
- const uint8_t* slice = static_cast<const uint8_t*>(data);
- for (uint32_t z = 0; z < writeSizePixel.depthOrArrayLayers; ++z) {
- const uint8_t* d = slice;
- for (uint32_t y = 0; y < writeSizePixel.height; ++y) {
- gl.TexSubImage3D(target, destination.mipLevel, destination.origin.x,
- destination.origin.y + y, destination.origin.z + z,
- writeSizePixel.width, 1, 1, format.format, format.type, d);
- d += dataLayout.bytesPerRow;
- }
- slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
- }
- }
+ ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
}
+ DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
index 3c153fb43ee..2511786c3c2 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
@@ -62,17 +62,17 @@ namespace dawn_native { namespace opengl {
return GL_ZERO;
case wgpu::BlendFactor::One:
return GL_ONE;
- case wgpu::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::Src:
return GL_SRC_COLOR;
- case wgpu::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::OneMinusSrc:
return GL_ONE_MINUS_SRC_COLOR;
case wgpu::BlendFactor::SrcAlpha:
return GL_SRC_ALPHA;
case wgpu::BlendFactor::OneMinusSrcAlpha:
return GL_ONE_MINUS_SRC_ALPHA;
- case wgpu::BlendFactor::DstColor:
+ case wgpu::BlendFactor::Dst:
return GL_DST_COLOR;
- case wgpu::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::OneMinusDst:
return GL_ONE_MINUS_DST_COLOR;
case wgpu::BlendFactor::DstAlpha:
return GL_DST_ALPHA;
@@ -80,10 +80,19 @@ namespace dawn_native { namespace opengl {
return GL_ONE_MINUS_DST_ALPHA;
case wgpu::BlendFactor::SrcAlphaSaturated:
return GL_SRC_ALPHA_SATURATE;
- case wgpu::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::Constant:
return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
- case wgpu::BlendFactor::OneMinusBlendColor:
+ case wgpu::BlendFactor::OneMinusConstant:
return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
+
+ // Deprecated blend factors should be normalized prior to this call.
+ case wgpu::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::DstColor:
+ case wgpu::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::OneMinusBlendColor:
+ UNREACHABLE();
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
index cc225491bd5..b147626cbf3 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
@@ -84,7 +84,17 @@ namespace dawn_native { namespace opengl {
// Tint currently does not support emitting GLSL, so when provided a Tint program need to
// generate SPIRV and SPIRV-Cross reflection data to be used in this backend.
if (GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
- tint::writer::spirv::Generator generator(GetTintProgram());
+ tint::transform::Manager transformManager;
+ transformManager.append(std::make_unique<tint::transform::Spirv>());
+
+ tint::transform::DataMap transformInputs;
+
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program,
+ RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+ nullptr, GetCompilationMessages()));
+
+ tint::writer::spirv::Generator generator(&program);
if (!generator.Generate()) {
std::ostringstream errorStream;
errorStream << "Generator: " << generator.error() << std::endl;
@@ -169,38 +179,41 @@ namespace dawn_native { namespace opengl {
// Change binding names to be "dawn_binding_<group>_<binding>".
// Also unsets the SPIRV "Binding" decoration as it outputs "layout(binding=)" which
// isn't supported on OSX's OpenGL.
- for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
+ const PipelineLayout::BindingIndexInfo& indices = layout->GetBindingIndexInfo();
+
+ // Modify the decoration of variables so that SPIRV-Cross outputs only
+ // layout(binding=<index>) for interface variables.
+ //
+ // When the use_tint_generator toggle is on, Tint is used for the reflection of bindings
+ // for the implicit pipeline layout and pipeline/layout validation, but bindingInfo is set
+ // to mGLEntryPoints which is the SPIRV-Cross reflection. Tint reflects bindings used more
+ // precisely than SPIRV-Cross so some bindings in bindingInfo might not exist in the layout
+ // and querying the layout for them would cause an ASSERT. That's why we defensively check
+ // that bindings are in the layout before modifying them. This slight hack is ok because in
+ // the long term we will use Tint to produce GLSL.
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
for (const auto& it : bindingInfo[group]) {
+ const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(group);
BindingNumber bindingNumber = it.first;
const auto& info = it.second;
- uint32_t resourceId;
- switch (info.bindingType) {
- // When the resource is a uniform or shader storage block, we should change the
- // block name instead of the instance name.
- case BindingInfoType::Buffer:
- resourceId = info.base_type_id;
- break;
- default:
- resourceId = info.id;
- break;
+ if (!bgl->HasBinding(bindingNumber)) {
+ continue;
}
- compiler.set_name(resourceId, GetBindingName(group, bindingNumber));
+ // Remove the name of the base type. This works around an issue where if the SPIRV
+ // has two uniform/storage interface variables that point to the same base type,
+ // then SPIRV-Cross would emit two bindings with type names that conflict:
+ //
+ // layout(binding=0) uniform Buf {...} binding0;
+ // layout(binding=1) uniform Buf {...} binding1;
+ compiler.set_name(info.base_type_id, "");
+
+ BindingIndex bindingIndex = bgl->GetBindingIndex(bindingNumber);
+
compiler.unset_decoration(info.id, spv::DecorationDescriptorSet);
- // OpenGL ES has no glShaderStorageBlockBinding call, so we adjust the SSBO binding
- // decoration here instead.
- if (version.IsES() && info.bindingType == BindingInfoType::Buffer &&
- (info.buffer.type == wgpu::BufferBindingType::Storage ||
- info.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage)) {
- const auto& indices = layout->GetBindingIndexInfo();
- BindingIndex bindingIndex =
- layout->GetBindGroupLayout(group)->GetBindingIndex(bindingNumber);
- compiler.set_decoration(info.id, spv::DecorationBinding,
- indices[group][bindingIndex]);
- } else {
- compiler.unset_decoration(info.id, spv::DecorationBinding);
- }
+ compiler.set_decoration(info.id, spv::DecorationBinding,
+ indices[group][bindingIndex]);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
index c7fc9ef74b7..8b97064ea79 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
@@ -19,6 +19,7 @@
#include "common/Math.h"
#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/opengl/BufferGL.h"
+#include "dawn_native/opengl/CommandBufferGL.h"
#include "dawn_native/opengl/DeviceGL.h"
#include "dawn_native/opengl/UtilsGL.h"
@@ -247,6 +248,17 @@ namespace dawn_native { namespace opengl {
gl.GenFramebuffers(1, &framebuffer);
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+ GLenum attachment;
+ if (range.aspects == (Aspect::Depth | Aspect::Stencil)) {
+ attachment = GL_DEPTH_STENCIL_ATTACHMENT;
+ } else if (range.aspects == Aspect::Depth) {
+ attachment = GL_DEPTH_ATTACHMENT;
+ } else if (range.aspects == Aspect::Stencil) {
+ attachment = GL_STENCIL_ATTACHMENT;
+ } else {
+ UNREACHABLE();
+ }
+
for (uint32_t level = range.baseMipLevel;
level < range.baseMipLevel + range.levelCount; ++level) {
switch (GetDimension()) {
@@ -268,9 +280,9 @@ namespace dawn_native { namespace opengl {
continue;
}
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER,
- GL_DEPTH_STENCIL_ATTACHMENT, GetGLTarget(),
- GetHandle(), static_cast<GLint>(level));
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
+ GetGLTarget(), GetHandle(),
+ static_cast<GLint>(level));
DoClear(aspectsToClear);
} else {
for (uint32_t layer = range.baseArrayLayer;
@@ -292,9 +304,8 @@ namespace dawn_native { namespace opengl {
}
gl.FramebufferTextureLayer(
- GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- GetHandle(), static_cast<GLint>(level),
- static_cast<GLint>(layer));
+ GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
+ static_cast<GLint>(level), static_cast<GLint>(layer));
DoClear(aspectsToClear);
}
}
@@ -310,13 +321,30 @@ namespace dawn_native { namespace opengl {
} else {
ASSERT(range.aspects == Aspect::Color);
+ // For gl.ClearBufferiv/uiv calls
+ constexpr std::array<GLuint, 4> kClearColorDataUint0 = {0u, 0u, 0u, 0u};
+ constexpr std::array<GLuint, 4> kClearColorDataUint1 = {1u, 1u, 1u, 1u};
+ std::array<GLuint, 4> clearColorData;
+ clearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0u : 1u);
+
+ // For gl.ClearBufferfv calls
+ constexpr std::array<GLfloat, 4> kClearColorDataFloat0 = {0.f, 0.f, 0.f, 0.f};
+ constexpr std::array<GLfloat, 4> kClearColorDataFloat1 = {1.f, 1.f, 1.f, 1.f};
+ std::array<GLfloat, 4> fClearColorData;
+ fClearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f);
+
static constexpr uint32_t MAX_TEXEL_SIZE = 16;
const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
ASSERT(blockInfo.byteSize <= MAX_TEXEL_SIZE);
- std::array<GLbyte, MAX_TEXEL_SIZE> clearColorData;
- clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 255;
- clearColorData.fill(clearColor);
+ // For gl.ClearTexSubImage calls
+ constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes0 = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes255 = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
+
+ wgpu::TextureComponentType baseType =
+ GetFormat().GetAspectInfo(Aspect::Color).baseType;
const GLFormat& glFormat = GetGLFormat();
for (uint32_t level = range.baseMipLevel;
@@ -333,26 +361,84 @@ namespace dawn_native { namespace opengl {
if (gl.IsAtLeastGL(4, 4)) {
gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
static_cast<GLint>(layer), mipSize.width,
- mipSize.height, 1, glFormat.format, glFormat.type,
- clearColorData.data());
- } else {
- GLuint framebuffer = 0;
- gl.GenFramebuffers(1, &framebuffer);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
- if (GetArrayLayers() == 1) {
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- GetGLTarget(), GetHandle(), level);
- } else {
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER,
- GL_COLOR_ATTACHMENT0, GetHandle(), level,
- layer);
+ mipSize.height, mipSize.depthOrArrayLayers,
+ glFormat.format, glFormat.type,
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataBytes0.data()
+ : kClearColorDataBytes255.data());
+ continue;
+ }
+
+ GLuint framebuffer = 0;
+ gl.GenFramebuffers(1, &framebuffer);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+
+ GLenum attachment = GL_COLOR_ATTACHMENT0;
+ gl.DrawBuffers(1, &attachment);
+
+ gl.Disable(GL_SCISSOR_TEST);
+ gl.ColorMask(true, true, true, true);
+
+ auto DoClear = [&]() {
+ switch (baseType) {
+ case wgpu::TextureComponentType::Float: {
+ gl.ClearBufferfv(GL_COLOR, 0,
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataFloat0.data()
+ : kClearColorDataFloat1.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Uint: {
+ gl.ClearBufferuiv(GL_COLOR, 0,
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataUint0.data()
+ : kClearColorDataUint1.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Sint: {
+ gl.ClearBufferiv(GL_COLOR, 0,
+ reinterpret_cast<const GLint*>(
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataUint0.data()
+ : kClearColorDataUint1.data()));
+ break;
+ }
+
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
+ }
+ };
+
+ if (GetArrayLayers() == 1) {
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
+ case wgpu::TextureDimension::e2D:
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
+ GetGLTarget(), GetHandle(), level);
+ DoClear();
+ break;
+ case wgpu::TextureDimension::e3D:
+ uint32_t depth =
+ GetMipLevelVirtualSize(level).depthOrArrayLayers;
+ for (GLint z = 0; z < static_cast<GLint>(depth); ++z) {
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
+ GetHandle(), level, z);
+ DoClear();
+ }
+ break;
}
- gl.Disable(GL_SCISSOR_TEST);
- gl.ClearBufferiv(GL_COLOR, 0,
- reinterpret_cast<const GLint*>(clearColorData.data()));
- gl.Enable(GL_SCISSOR_TEST);
- gl.DeleteFramebuffers(1, &framebuffer);
+
+ } else {
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
+ level, layer);
+ DoClear();
}
+
+ gl.Enable(GL_SCISSOR_TEST);
+ gl.DeleteFramebuffers(1, &framebuffer);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
}
}
}
@@ -362,20 +448,27 @@ namespace dawn_native { namespace opengl {
// create temp buffer with clear color to copy to the texture image
const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
ASSERT(kTextureBytesPerRowAlignment % blockInfo.byteSize == 0);
- uint32_t bytesPerRow = Align((GetWidth() / blockInfo.width) * blockInfo.byteSize,
- kTextureBytesPerRowAlignment);
+
+ Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+ uint32_t bytesPerRow =
+ Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 4);
// Make sure that we are not rounding
ASSERT(bytesPerRow % blockInfo.byteSize == 0);
- ASSERT(GetHeight() % blockInfo.height == 0);
+ ASSERT(largestMipSize.height % blockInfo.height == 0);
+
+ uint64_t bufferSize64 = static_cast<uint64_t>(bytesPerRow) *
+ (largestMipSize.height / blockInfo.height) *
+ largestMipSize.depthOrArrayLayers;
+ if (bufferSize64 > std::numeric_limits<size_t>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+ }
+ size_t bufferSize = static_cast<size_t>(bufferSize64);
dawn_native::BufferDescriptor descriptor = {};
descriptor.mappedAtCreation = true;
descriptor.usage = wgpu::BufferUsage::CopySrc;
- descriptor.size = bytesPerRow * (GetHeight() / blockInfo.height);
- if (descriptor.size > std::numeric_limits<uint32_t>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
- }
+ descriptor.size = bufferSize;
// We don't count the lazy clear of srcBuffer because it is an internal buffer.
// TODO(natlee@microsoft.com): use Dynamic Uploader here for temp buffer
@@ -383,57 +476,38 @@ namespace dawn_native { namespace opengl {
DAWN_TRY_ASSIGN(srcBuffer, Buffer::CreateInternalBuffer(device, &descriptor, false));
// Fill the buffer with clear color
- memset(srcBuffer->GetMappedRange(0, descriptor.size), clearColor, descriptor.size);
+ memset(srcBuffer->GetMappedRange(0, bufferSize), clearColor, bufferSize);
srcBuffer->Unmap();
- // Bind buffer and texture, and make the buffer to texture copy
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
- (bytesPerRow / blockInfo.byteSize) * blockInfo.width);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
++level) {
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
- gl.ActiveTexture(GL_TEXTURE0);
- gl.BindTexture(GetGLTarget(), GetHandle());
-
- Extent3D size = GetMipLevelPhysicalSize(level);
- switch (GetDimension()) {
- case wgpu::TextureDimension::e2D:
- if (GetArrayLayers() == 1) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, 0, Aspect::Color))) {
- // Skip lazy clears if already initialized.
- continue;
- }
- gl.TexSubImage2D(GetGLTarget(), static_cast<GLint>(level), 0, 0,
- size.width, size.height, GetGLFormat().format,
- GetGLFormat().type, 0);
- } else {
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer,
- Aspect::Color))) {
- // Skip lazy clears if already initialized.
- continue;
- }
- gl.TexSubImage3D(GetGLTarget(), static_cast<GLint>(level), 0, 0,
- static_cast<GLint>(layer), size.width, size.height,
- 1, GetGLFormat().format, GetGLFormat().type, 0);
- }
- }
- break;
+ TextureCopy textureCopy;
+ textureCopy.texture = this;
+ textureCopy.mipLevel = level;
+ textureCopy.origin = {};
+ textureCopy.aspect = Aspect::Color;
+
+ TextureDataLayout dataLayout;
+ dataLayout.offset = 0;
+ dataLayout.bytesPerRow = bytesPerRow;
+ dataLayout.rowsPerImage = largestMipSize.height;
+
+ Extent3D mipSize = GetMipLevelPhysicalSize(level);
+
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
- case wgpu::TextureDimension::e1D:
- case wgpu::TextureDimension::e3D:
- UNREACHABLE();
+ textureCopy.origin.z = layer;
+ DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, 0, dataLayout, mipSize);
}
}
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
-
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
if (clearValue == TextureBase::ClearValue::Zero) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
index c9bcdcc494f..7fc85428629 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
@@ -52,6 +52,12 @@ constexpr char kVulkanLibName[] = "libvulkan.so";
# error "Unimplemented Vulkan backend platform"
#endif
+// List of Vulkan MessageIdNames to suppress validation messages for. These should be used sparingly
+// but may be useful to temporarily quiet issues while a fix is in the works.
+constexpr const char* kSuppressedValidationMessageNames[] = {
+ "UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout", // (ISSUE: dawn:785)
+};
+
namespace dawn_native { namespace vulkan {
namespace {
@@ -61,6 +67,13 @@ namespace dawn_native { namespace vulkan {
VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
void* /* pUserData */) {
+ // If the message is of a suppressed type, ignore it.
+ for (const char* msgName : kSuppressedValidationMessageNames) {
+ if (strstr(pCallbackData->pMessageIdName, msgName) != nullptr) {
+ return VK_FALSE;
+ }
+ }
+
dawn::WarningLog() << pCallbackData->pMessage;
ASSERT((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) == 0);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
index 700e850318a..78f7a7a58ed 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
@@ -89,13 +89,16 @@ namespace dawn_native { namespace vulkan {
ityp::vector<BindingIndex, VkDescriptorSetLayoutBinding> bindings;
bindings.reserve(GetBindingCount());
+ bool useBindingIndex = GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator);
+
for (const auto& it : GetBindingMap()) {
BindingNumber bindingNumber = it.first;
BindingIndex bindingIndex = it.second;
const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
VkDescriptorSetLayoutBinding vkBinding;
- vkBinding.binding = static_cast<uint32_t>(bindingNumber);
+ vkBinding.binding = useBindingIndex ? static_cast<uint32_t>(bindingIndex)
+ : static_cast<uint32_t>(bindingNumber);
vkBinding.descriptorType = VulkanDescriptorType(bindingInfo);
vkBinding.descriptorCount = 1;
vkBinding.stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
index 72f8b698d7a..cc502c94d92 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
@@ -43,6 +43,10 @@ namespace dawn_native { namespace vulkan {
// the pools are reused when no longer used. Minimizing the number of descriptor pool allocation
// is important because creating them can incur GPU memory allocation which is usually an
// expensive syscall.
+ //
+ // The Vulkan BindGroupLayout is dependent on UseTintGenerator or not.
+ // When UseTintGenerator is on, VkDescriptorSetLayoutBinding::binding is set to BindingIndex,
+ // otherwise it is set to BindingNumber.
class BindGroupLayout final : public BindGroupLayoutBase {
public:
static ResultOrError<Ref<BindGroupLayout>> Create(
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
index 07653e8bf59..b2334d10952 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
@@ -47,6 +47,8 @@ namespace dawn_native { namespace vulkan {
ityp::stack_vec<uint32_t, VkDescriptorImageInfo, kMaxOptimalBindingsPerGroup>
writeImageInfo(bindingCount);
+ bool useBindingIndex = device->IsToggleEnabled(Toggle::UseTintGenerator);
+
uint32_t numWrites = 0;
for (const auto& it : GetLayout()->GetBindingMap()) {
BindingNumber bindingNumber = it.first;
@@ -57,7 +59,8 @@ namespace dawn_native { namespace vulkan {
write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write.pNext = nullptr;
write.dstSet = GetHandle();
- write.dstBinding = static_cast<uint32_t>(bindingNumber);
+ write.dstBinding = useBindingIndex ? static_cast<uint32_t>(bindingIndex)
+ : static_cast<uint32_t>(bindingNumber);
write.dstArrayElement = 0;
write.descriptorCount = 1;
write.descriptorType = VulkanDescriptorType(bindingInfo);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
index dac780bf0b9..14b6940eb91 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
@@ -26,6 +26,9 @@ namespace dawn_native { namespace vulkan {
class Device;
+ // The Vulkan BindGroup is dependent on UseTintGenerator or not.
+ // When UseTintGenerator is on, VkWriteDescriptorSet::dstBinding is set to BindingIndex,
+ // otherwise it is set to BindingNumber.
class BindGroup final : public BindGroupBase, public PlacementAllocated {
public:
static ResultOrError<Ref<BindGroup>> Create(Device* device,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
index 47fac441a5f..bbdcf6c3b92 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
@@ -69,160 +69,126 @@ namespace dawn_native { namespace vulkan {
const Texture* dstTexture = ToBackend(dstCopy.texture.Get());
VkImageCopy region;
-
- // TODO(jiawei.shao@intel.com): support 1D and 3D textures
- ASSERT(srcTexture->GetDimension() == wgpu::TextureDimension::e2D &&
- dstTexture->GetDimension() == wgpu::TextureDimension::e2D);
region.srcSubresource.aspectMask = VulkanAspectMask(aspect);
region.srcSubresource.mipLevel = srcCopy.mipLevel;
- region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
- region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
+ region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
+ region.dstSubresource.mipLevel = dstCopy.mipLevel;
+
+ bool has3DTextureInCopy = false;
region.srcOffset.x = srcCopy.origin.x;
region.srcOffset.y = srcCopy.origin.y;
- region.srcOffset.z = 0;
-
- region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
- region.dstSubresource.mipLevel = dstCopy.mipLevel;
- region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
- region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
+ switch (srcTexture->GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
+ region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
+ region.srcOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e3D:
+ has3DTextureInCopy = true;
+ region.srcSubresource.baseArrayLayer = 0;
+ region.srcSubresource.layerCount = 1;
+ region.srcOffset.z = srcCopy.origin.z;
+ break;
+ case wgpu::TextureDimension::e1D:
+ // TODO(jiawei.shao@intel.com): support 1D textures
+ UNREACHABLE();
+ }
region.dstOffset.x = dstCopy.origin.x;
region.dstOffset.y = dstCopy.origin.y;
- region.dstOffset.z = 0;
+ switch (dstTexture->GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
+ region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
+ region.dstOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e3D:
+ has3DTextureInCopy = true;
+ region.dstSubresource.baseArrayLayer = 0;
+ region.dstSubresource.layerCount = 1;
+ region.dstOffset.z = dstCopy.origin.z;
+ break;
+ case wgpu::TextureDimension::e1D:
+ // TODO(jiawei.shao@intel.com): support 1D textures
+ UNREACHABLE();
+ }
ASSERT(HasSameTextureCopyExtent(srcCopy, dstCopy, copySize));
Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
region.extent.width = imageExtent.width;
region.extent.height = imageExtent.height;
- region.extent.depth = 1;
+ region.extent.depth = has3DTextureInCopy ? copySize.depthOrArrayLayers : 1;
return region;
}
- void ApplyDescriptorSets(
- Device* device,
- VkCommandBuffer commands,
- VkPipelineBindPoint bindPoint,
- VkPipelineLayout pipelineLayout,
- const BindGroupLayoutMask& bindGroupsToApply,
- const ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups>& bindGroups,
- const ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups>& dynamicOffsetCounts,
- const ityp::array<BindGroupIndex,
- std::array<uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
- kMaxBindGroups>& dynamicOffsets) {
- for (BindGroupIndex dirtyIndex : IterateBitSet(bindGroupsToApply)) {
- VkDescriptorSet set = ToBackend(bindGroups[dirtyIndex])->GetHandle();
- const uint32_t* dynamicOffset = dynamicOffsetCounts[dirtyIndex] > 0
- ? dynamicOffsets[dirtyIndex].data()
- : nullptr;
- device->fn.CmdBindDescriptorSets(commands, bindPoint, pipelineLayout,
- static_cast<uint32_t>(dirtyIndex), 1, &*set,
- dynamicOffsetCounts[dirtyIndex], dynamicOffset);
- }
- }
-
- class RenderDescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
+ class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
public:
- RenderDescriptorSetTracker() = default;
+ DescriptorSetTracker() = default;
void Apply(Device* device,
CommandRecordingContext* recordingContext,
VkPipelineBindPoint bindPoint) {
- ApplyDescriptorSets(device, recordingContext->commandBuffer, bindPoint,
- ToBackend(mPipelineLayout)->GetHandle(),
- mDirtyBindGroupsObjectChangedOrIsDynamic, mBindGroups,
- mDynamicOffsetCounts, mDynamicOffsets);
+ for (BindGroupIndex dirtyIndex :
+ IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ VkDescriptorSet set = ToBackend(mBindGroups[dirtyIndex])->GetHandle();
+ const uint32_t* dynamicOffset = mDynamicOffsetCounts[dirtyIndex] > 0
+ ? mDynamicOffsets[dirtyIndex].data()
+ : nullptr;
+ device->fn.CmdBindDescriptorSets(
+ recordingContext->commandBuffer, bindPoint,
+ ToBackend(mPipelineLayout)->GetHandle(), static_cast<uint32_t>(dirtyIndex),
+ 1, &*set, mDynamicOffsetCounts[dirtyIndex], dynamicOffset);
+ }
DidApply();
}
};
- class ComputeDescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
- public:
- ComputeDescriptorSetTracker() = default;
+ // Records the necessary barriers for a synchronization scope using the resource usage
+ // data pre-computed in the frontend. Also performs lazy initialization if required.
+ void TransitionAndClearForSyncScope(Device* device,
+ CommandRecordingContext* recordingContext,
+ const SyncScopeResourceUsage& scope) {
+ std::vector<VkBufferMemoryBarrier> bufferBarriers;
+ std::vector<VkImageMemoryBarrier> imageBarriers;
+ VkPipelineStageFlags srcStages = 0;
+ VkPipelineStageFlags dstStages = 0;
- void Apply(Device* device,
- CommandRecordingContext* recordingContext,
- VkPipelineBindPoint bindPoint) {
- ApplyDescriptorSets(device, recordingContext->commandBuffer, bindPoint,
- ToBackend(mPipelineLayout)->GetHandle(),
- mDirtyBindGroupsObjectChangedOrIsDynamic, mBindGroups,
- mDynamicOffsetCounts, mDynamicOffsets);
-
- std::vector<VkBufferMemoryBarrier> bufferBarriers;
- std::vector<VkImageMemoryBarrier> imageBarriers;
- VkPipelineStageFlags srcStages = 0;
- VkPipelineStageFlags dstStages = 0;
-
- for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
- BindGroupLayoutBase* layout = mBindGroups[index]->GetLayout();
- for (BindingIndex binding{0}; binding < layout->GetBindingCount(); ++binding) {
- const BindingInfo& bindingInfo = layout->GetBindingInfo(binding);
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- wgpu::BufferUsage usage;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- usage = wgpu::BufferUsage::Uniform;
- break;
- case wgpu::BufferBindingType::Storage:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- usage = wgpu::BufferUsage::Storage;
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- VkBufferMemoryBarrier bufferBarrier;
- if (ToBackend(mBindGroups[index]
- ->GetBindingAsBufferBinding(binding)
- .buffer)
- ->TransitionUsageAndGetResourceBarrier(
- usage, &bufferBarrier, &srcStages, &dstStages)) {
- bufferBarriers.push_back(bufferBarrier);
- }
- break;
- }
+ for (size_t i = 0; i < scope.buffers.size(); ++i) {
+ Buffer* buffer = ToBackend(scope.buffers[i]);
+ buffer->EnsureDataInitialized(recordingContext);
- case BindingInfoType::StorageTexture: {
- TextureViewBase* view =
- mBindGroups[index]->GetBindingAsTextureView(binding);
- ToBackend(view->GetTexture())
- ->TransitionUsageAndGetResourceBarrier(
- wgpu::TextureUsage::Storage, view->GetSubresourceRange(),
- &imageBarriers, &srcStages, &dstStages);
- break;
- }
+ VkBufferMemoryBarrier bufferBarrier;
+ if (buffer->TransitionUsageAndGetResourceBarrier(
+ scope.bufferUsages[i], &bufferBarrier, &srcStages, &dstStages)) {
+ bufferBarriers.push_back(bufferBarrier);
+ }
+ }
- case BindingInfoType::Texture: {
- TextureViewBase* view =
- mBindGroups[index]->GetBindingAsTextureView(binding);
- ToBackend(view->GetTexture())
- ->TransitionUsageAndGetResourceBarrier(
- wgpu::TextureUsage::Sampled, view->GetSubresourceRange(),
- &imageBarriers, &srcStages, &dstStages);
- break;
- }
+ for (size_t i = 0; i < scope.textures.size(); ++i) {
+ Texture* texture = ToBackend(scope.textures[i]);
- case BindingInfoType::Sampler:
- // Don't require barriers.
- break;
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ scope.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(recordingContext, range);
}
- }
- }
-
- if (!bufferBarriers.empty() || !imageBarriers.empty()) {
- ASSERT(srcStages != 0 && dstStages != 0);
- device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages,
- dstStages, 0, 0, nullptr, bufferBarriers.size(),
- bufferBarriers.data(), imageBarriers.size(),
- imageBarriers.data());
- }
+ });
+ texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i],
+ &imageBarriers, &srcStages, &dstStages);
+ }
- DidApply();
+ if (bufferBarriers.size() || imageBarriers.size()) {
+ device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages,
+ 0, 0, nullptr, bufferBarriers.size(),
+ bufferBarriers.data(), imageBarriers.size(),
+ imageBarriers.data());
}
- };
+ }
MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
Device* device,
@@ -531,45 +497,8 @@ namespace dawn_native { namespace vulkan {
// And resets the used query sets which are rewritten on the render pass.
auto PrepareResourcesForRenderPass = [](Device* device,
CommandRecordingContext* recordingContext,
- const PassResourceUsage& usages) {
- std::vector<VkBufferMemoryBarrier> bufferBarriers;
- std::vector<VkImageMemoryBarrier> imageBarriers;
- VkPipelineStageFlags srcStages = 0;
- VkPipelineStageFlags dstStages = 0;
-
- for (size_t i = 0; i < usages.buffers.size(); ++i) {
- Buffer* buffer = ToBackend(usages.buffers[i]);
- buffer->EnsureDataInitialized(recordingContext);
-
- VkBufferMemoryBarrier bufferBarrier;
- if (buffer->TransitionUsageAndGetResourceBarrier(
- usages.bufferUsages[i], &bufferBarrier, &srcStages, &dstStages)) {
- bufferBarriers.push_back(bufferBarrier);
- }
- }
-
- for (size_t i = 0; i < usages.textures.size(); ++i) {
- Texture* texture = ToBackend(usages.textures[i]);
-
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- usages.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(recordingContext, range);
- }
- });
- texture->TransitionUsageForPass(recordingContext, usages.textureUsages[i],
- &imageBarriers, &srcStages, &dstStages);
- }
-
- if (bufferBarriers.size() || imageBarriers.size()) {
- device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages,
- 0, 0, nullptr, bufferBarriers.size(),
- bufferBarriers.data(), imageBarriers.size(),
- imageBarriers.data());
- }
+ const RenderPassResourceUsage& usages) {
+ TransitionAndClearForSyncScope(device, recordingContext, usages);
// Reset all query set used on current render pass together before beginning render pass
// because the reset command must be called outside render pass
@@ -579,25 +508,8 @@ namespace dawn_native { namespace vulkan {
}
};
- // TODO(jiawei.shao@intel.com): move the resource lazy clearing inside the barrier tracking
- // for compute passes.
- auto PrepareResourcesForComputePass = [](Device* device,
- CommandRecordingContext* recordingContext,
- const PassResourceUsage& usages) {
- for (size_t i = 0; i < usages.buffers.size(); ++i) {
- Buffer* buffer = ToBackend(usages.buffers[i]);
- buffer->EnsureDataInitialized(recordingContext);
- }
-
- for (size_t i = 0; i < usages.textures.size(); ++i) {
- Texture* texture = ToBackend(usages.textures[i]);
- texture->EnsureSubresourceContentInitialized(recordingContext,
- texture->GetAllSubresources());
- }
- };
-
- const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
- size_t nextPassNumber = 0;
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
Command type;
while (mCommands.NextCommandId(&type)) {
@@ -637,7 +549,7 @@ namespace dawn_native { namespace vulkan {
ComputeBufferImageCopyRegion(src, dst, copy->copySize);
VkImageSubresourceLayers subresource = region.imageSubresource;
- ASSERT(dst.texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(dst.texture->GetDimension() != wgpu::TextureDimension::e1D);
SubresourceRange range =
GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
@@ -675,7 +587,7 @@ namespace dawn_native { namespace vulkan {
VkBufferImageCopy region =
ComputeBufferImageCopyRegion(dst, src, copy->copySize);
- ASSERT(src.texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(src.texture->GetDimension() != wgpu::TextureDimension::e1D);
SubresourceRange range =
GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
@@ -774,24 +686,25 @@ namespace dawn_native { namespace vulkan {
case Command::BeginRenderPass: {
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- PrepareResourcesForRenderPass(device, recordingContext,
- passResourceUsages[nextPassNumber]);
+ PrepareResourcesForRenderPass(
+ device, recordingContext,
+ GetResourceUsages().renderPasses[nextRenderPassNumber]);
LazyClearRenderPassAttachments(cmd);
DAWN_TRY(RecordRenderPass(recordingContext, cmd));
- nextPassNumber++;
+ nextRenderPassNumber++;
break;
}
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
- PrepareResourcesForComputePass(device, recordingContext,
- passResourceUsages[nextPassNumber]);
- DAWN_TRY(RecordComputePass(recordingContext));
+ DAWN_TRY(RecordComputePass(
+ recordingContext,
+ GetResourceUsages().computePasses[nextComputePassNumber]));
- nextPassNumber++;
+ nextComputePassNumber++;
break;
}
@@ -800,13 +713,29 @@ namespace dawn_native { namespace vulkan {
QuerySet* querySet = ToBackend(cmd->querySet.Get());
Buffer* destination = ToBackend(cmd->destination.Get());
- // TODO(hao.x.li@intel.com): Clear the resolve region of the buffer to 0 if at
- // least one query is unavailable for the resolving and the resolve buffer has
- // been initialized or fully used.
+ // vkCmdCopyQueryPoolResults only can retrieve available queries because
+ // VK_QUERY_RESULT_WAIT_BIT is set, for these unavailable queries, we need to
+ // clear the resolving region of the buffer to 0s if the buffer has been
+ // initialized or fully used.
+ auto startIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery;
+ auto endIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery +
+ cmd->queryCount;
+ bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
+ if (hasUnavailableQueries &&
+ (destination->IsDataInitialized() ||
+ destination->IsFullBufferRange(cmd->destinationOffset,
+ cmd->queryCount * sizeof(uint64_t)))) {
+ destination->TransitionUsageNow(recordingContext,
+ wgpu::BufferUsage::CopyDst);
+ device->fn.CmdFillBuffer(commands, destination->GetHandle(),
+ cmd->destinationOffset,
+ cmd->queryCount * sizeof(uint64_t), 0u);
+ } else {
+ destination->EnsureDataInitializedAsDestination(
+ recordingContext, cmd->destinationOffset,
+ cmd->queryCount * sizeof(uint64_t));
+ }
- destination->EnsureDataInitializedAsDestination(
- recordingContext, cmd->destinationOffset,
- cmd->queryCount * sizeof(uint64_t));
destination->TransitionUsageNow(recordingContext,
wgpu::BufferUsage::QueryResolve);
@@ -885,11 +814,13 @@ namespace dawn_native { namespace vulkan {
return {};
}
- MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext) {
+ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext,
+ const ComputePassResourceUsage& resourceUsages) {
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = recordingContext->commandBuffer;
- ComputeDescriptorSetTracker descriptorSets = {};
+ uint64_t currentDispatch = 0;
+ DescriptorSetTracker descriptorSets = {};
Command type;
while (mCommands.NextCommandId(&type)) {
@@ -902,21 +833,27 @@ namespace dawn_native { namespace vulkan {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+ TransitionAndClearForSyncScope(device, recordingContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+
device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
+ currentDispatch++;
break;
}
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- ToBackend(dispatch->indirectBuffer)
- ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::Indirect);
VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
+ TransitionAndClearForSyncScope(device, recordingContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+
device->fn.CmdDispatchIndirect(
commands, indirectBuffer,
static_cast<VkDeviceSize>(dispatch->indirectOffset));
+ currentDispatch++;
break;
}
@@ -1054,7 +991,7 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
}
- RenderDescriptorSetTracker descriptorSets = {};
+ DescriptorSetTracker descriptorSets = {};
RenderPipeline* lastPipeline = nullptr;
auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
@@ -1209,8 +1146,8 @@ namespace dawn_native { namespace vulkan {
return {};
}
- case Command::SetBlendColor: {
- SetBlendColorCmd* cmd = mCommands.NextCommand<SetBlendColorCmd>();
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
const std::array<float, 4> blendConstants = ConvertToFloatColor(cmd->color);
device->fn.CmdSetBlendConstants(commands, blendConstants.data());
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
index edc35ff1280..d5d603b611f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
@@ -40,7 +40,8 @@ namespace dawn_native { namespace vulkan {
private:
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- MaybeError RecordComputePass(CommandRecordingContext* recordingContext);
+ MaybeError RecordComputePass(CommandRecordingContext* recordingContext,
+ const ComputePassResourceUsage& resourceUsages);
MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
BeginRenderPassCmd* renderPass);
void RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
index a81dee9039e..322c0262206 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
@@ -45,7 +45,15 @@ namespace dawn_native { namespace vulkan {
createInfo.stage.pNext = nullptr;
createInfo.stage.flags = 0;
createInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
- createInfo.stage.module = ToBackend(descriptor->computeStage.module)->GetHandle();
+ if (GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
+ // Generate a new VkShaderModule with BindingRemapper tint transform for each pipeline
+ DAWN_TRY_ASSIGN(createInfo.stage.module,
+ ToBackend(descriptor->computeStage.module)
+ ->GetTransformedModuleHandle(descriptor->computeStage.entryPoint,
+ ToBackend(GetLayout())));
+ } else {
+ createInfo.stage.module = ToBackend(descriptor->computeStage.module)->GetHandle();
+ }
createInfo.stage.pName = descriptor->computeStage.entryPoint;
createInfo.stage.pSpecializationInfo = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
index 530b5ca11d0..df3324fc62f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
@@ -650,7 +650,7 @@ namespace dawn_native { namespace vulkan {
VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
VkImageSubresourceLayers subresource = region.imageSubresource;
- ASSERT(dst->texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(dst->texture->GetDimension() != wgpu::TextureDimension::e1D);
SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
index ae6032c0da8..31b31bc8df1 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
@@ -57,7 +57,6 @@ namespace dawn_native { namespace vulkan {
uint32_t GetGraphicsQueueFamily() const;
VkQueue GetQueue() const;
- BufferUploader* GetBufferUploader() const;
FencedDeleter* GetFencedDeleter() const;
RenderPassCache* GetRenderPassCache() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
index bbb4f8eed49..52e9e0fcc13 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
@@ -158,17 +158,17 @@ namespace dawn_native { namespace vulkan {
return VK_BLEND_FACTOR_ZERO;
case wgpu::BlendFactor::One:
return VK_BLEND_FACTOR_ONE;
- case wgpu::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::Src:
return VK_BLEND_FACTOR_SRC_COLOR;
- case wgpu::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::OneMinusSrc:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
case wgpu::BlendFactor::SrcAlpha:
return VK_BLEND_FACTOR_SRC_ALPHA;
case wgpu::BlendFactor::OneMinusSrcAlpha:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
- case wgpu::BlendFactor::DstColor:
+ case wgpu::BlendFactor::Dst:
return VK_BLEND_FACTOR_DST_COLOR;
- case wgpu::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::OneMinusDst:
return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
case wgpu::BlendFactor::DstAlpha:
return VK_BLEND_FACTOR_DST_ALPHA;
@@ -176,10 +176,19 @@ namespace dawn_native { namespace vulkan {
return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
case wgpu::BlendFactor::SrcAlphaSaturated:
return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
- case wgpu::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::Constant:
return VK_BLEND_FACTOR_CONSTANT_COLOR;
- case wgpu::BlendFactor::OneMinusBlendColor:
+ case wgpu::BlendFactor::OneMinusConstant:
return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
+
+ // Deprecated blend factors should be normalized prior to this call.
+ case wgpu::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::DstColor:
+ case wgpu::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::OneMinusBlendColor:
+ UNREACHABLE();
}
}
@@ -332,12 +341,27 @@ namespace dawn_native { namespace vulkan {
VkPipelineShaderStageCreateInfo shaderStages[2];
{
+ if (device->IsToggleEnabled(Toggle::UseTintGenerator)) {
+ // Generate a new VkShaderModule with BindingRemapper tint transform for each
+ // pipeline
+ DAWN_TRY_ASSIGN(shaderStages[0].module,
+ ToBackend(descriptor->vertex.module)
+ ->GetTransformedModuleHandle(descriptor->vertex.entryPoint,
+ ToBackend(GetLayout())));
+ DAWN_TRY_ASSIGN(shaderStages[1].module,
+ ToBackend(descriptor->fragment->module)
+ ->GetTransformedModuleHandle(descriptor->fragment->entryPoint,
+ ToBackend(GetLayout())));
+ } else {
+ shaderStages[0].module = ToBackend(descriptor->vertex.module)->GetHandle();
+ shaderStages[1].module = ToBackend(descriptor->fragment->module)->GetHandle();
+ }
+
shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[0].pNext = nullptr;
shaderStages[0].flags = 0;
shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shaderStages[0].pSpecializationInfo = nullptr;
- shaderStages[0].module = ToBackend(descriptor->vertex.module)->GetHandle();
shaderStages[0].pName = descriptor->vertex.entryPoint;
shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
@@ -345,7 +369,6 @@ namespace dawn_native { namespace vulkan {
shaderStages[1].flags = 0;
shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
shaderStages[1].pSpecializationInfo = nullptr;
- shaderStages[1].module = ToBackend(descriptor->fragment->module)->GetHandle();
shaderStages[1].pName = descriptor->fragment->entryPoint;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
index 0fb4c610b41..2e256da045a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
@@ -15,8 +15,10 @@
#include "dawn_native/vulkan/ShaderModuleVk.h"
#include "dawn_native/TintUtils.h"
+#include "dawn_native/vulkan/BindGroupLayoutVk.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
+#include "dawn_native/vulkan/PipelineLayoutVk.h"
#include "dawn_native/vulkan/VulkanError.h"
#include <spirv_cross.hpp>
@@ -53,14 +55,21 @@ namespace dawn_native { namespace vulkan {
errorStream << "Tint SPIR-V writer failure:" << std::endl;
tint::transform::Manager transformManager;
- transformManager.append(std::make_unique<tint::transform::BoundArrayAccessors>());
- transformManager.append(std::make_unique<tint::transform::EmitVertexPointSize>());
- transformManager.append(std::make_unique<tint::transform::Spirv>());
+ if (GetDevice()->IsRobustnessEnabled()) {
+ transformManager.Add<tint::transform::BoundArrayAccessors>();
+ }
+ transformManager.Add<tint::transform::Spirv>();
+
+ tint::transform::DataMap transformInputs;
+
+ tint::transform::Spirv::Config spirv_cfg;
+ spirv_cfg.emit_vertex_point_size = true;
+ transformInputs.Add<tint::transform::Spirv::Config>(spirv_cfg);
tint::Program program;
DAWN_TRY_ASSIGN(program,
RunTransforms(&transformManager, parseResult->tintProgram.get(),
- CompilationMessages()));
+ transformInputs, nullptr, GetCompilationMessages()));
tint::writer::spirv::Generator generator(&program);
if (!generator.Generate()) {
@@ -103,10 +112,102 @@ namespace dawn_native { namespace vulkan {
device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
mHandle = VK_NULL_HANDLE;
}
+
+ for (const auto& iter : mTransformedShaderModuleCache) {
+ device->GetFencedDeleter()->DeleteWhenUnused(iter.second);
+ }
}
VkShaderModule ShaderModule::GetHandle() const {
+ ASSERT(!GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator));
return mHandle;
}
+ ResultOrError<VkShaderModule> ShaderModule::GetTransformedModuleHandle(
+ const char* entryPointName,
+ PipelineLayout* layout) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+ ASSERT(GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator));
+
+ auto cacheKey = std::make_pair(layout, entryPointName);
+ auto iter = mTransformedShaderModuleCache.find(cacheKey);
+ if (iter != mTransformedShaderModuleCache.end()) {
+ auto cached = iter->second;
+ return cached;
+ }
+
+ // Creation of VkShaderModule is deferred to this point when using tint generator
+ std::ostringstream errorStream;
+ errorStream << "Tint SPIR-V writer failure:" << std::endl;
+
+ // Remap BindingNumber to BindingIndex in WGSL shader
+ using BindingRemapper = tint::transform::BindingRemapper;
+ using BindingPoint = tint::transform::BindingPoint;
+ BindingRemapper::BindingPoints bindingPoints;
+ BindingRemapper::AccessControls accessControls;
+
+ const EntryPointMetadata::BindingInfoArray& moduleBindingInfo =
+ GetEntryPoint(entryPointName).bindings;
+
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+ const auto& groupBindingInfo = moduleBindingInfo[group];
+ for (const auto& it : groupBindingInfo) {
+ BindingNumber binding = it.first;
+ BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(binding)};
+
+ BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(bindingIndex)};
+ if (srcBindingPoint != dstBindingPoint) {
+ bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+ }
+ }
+ }
+
+ tint::transform::Manager transformManager;
+ transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
+
+ tint::transform::DataMap transformInputs;
+ transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+ std::move(accessControls),
+ /* mayCollide */ false);
+
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+ nullptr, nullptr));
+
+ tint::writer::spirv::Generator generator(&program);
+ if (!generator.Generate()) {
+ errorStream << "Generator: " << generator.error() << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ std::vector<uint32_t> spirv = generator.result();
+
+ // Don't save the transformedParseResult but just create a VkShaderModule
+ VkShaderModuleCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ std::vector<uint32_t> vulkanSource;
+ createInfo.codeSize = spirv.size() * sizeof(uint32_t);
+ createInfo.pCode = spirv.data();
+
+ Device* device = ToBackend(GetDevice());
+
+ VkShaderModule newHandle = VK_NULL_HANDLE;
+
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &*newHandle),
+ "CreateShaderModule"));
+ if (newHandle != VK_NULL_HANDLE) {
+ mTransformedShaderModuleCache.emplace(cacheKey, newHandle);
+ }
+
+ return newHandle;
+ }
+
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
index 7c0d8ef841c..9dd7817d7ea 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
@@ -23,6 +23,11 @@
namespace dawn_native { namespace vulkan {
class Device;
+ class PipelineLayout;
+
+ using TransformedShaderModuleCache = std::unordered_map<PipelineLayoutEntryPointPair,
+ VkShaderModule,
+ PipelineLayoutEntryPointPairHashFunc>;
class ShaderModule final : public ShaderModuleBase {
public:
@@ -32,12 +37,19 @@ namespace dawn_native { namespace vulkan {
VkShaderModule GetHandle() const;
+ // This is only called when UseTintGenerator is on
+ ResultOrError<VkShaderModule> GetTransformedModuleHandle(const char* entryPointName,
+ PipelineLayout* layout);
+
private:
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
~ShaderModule() override;
MaybeError Initialize(ShaderModuleParseResult* parseResult);
VkShaderModule mHandle = VK_NULL_HANDLE;
+
+ // New handles created by GetTransformedModuleHandle at pipeline creation time
+ TransformedShaderModuleCache mTransformedShaderModuleCache;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
index 59e753fd839..6274f849c62 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
@@ -863,7 +863,7 @@ namespace dawn_native { namespace vulkan {
}
void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
- const PassTextureUsage& textureUsages,
+ const TextureSubresourceUsage& textureUsages,
std::vector<VkImageMemoryBarrier>* imageBarriers,
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages) {
@@ -986,9 +986,6 @@ namespace dawn_native { namespace vulkan {
ASSERT(imageBarriers != nullptr);
const Format& format = GetFormat();
- // This transitions assume it is a 2D texture
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
-
wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
mSubresourceLastUsages.Update(
range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
@@ -1030,9 +1027,13 @@ namespace dawn_native { namespace vulkan {
ASSERT(range.aspects == Aspect::Color);
const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
- uint32_t bytesPerRow = Align((GetWidth() / blockInfo.width) * blockInfo.byteSize,
- device->GetOptimalBytesPerRowAlignment());
- uint64_t bufferSize = bytesPerRow * (GetHeight() / blockInfo.height);
+ Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+
+ uint32_t bytesPerRow =
+ Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+ device->GetOptimalBytesPerRowAlignment());
+ uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+ largestMipSize.depthOrArrayLayers;
DynamicUploader* uploader = device->GetDynamicUploader();
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle,
@@ -1043,6 +1044,7 @@ namespace dawn_native { namespace vulkan {
std::vector<VkBufferImageCopy> regions;
for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
++level) {
+ Extent3D copySize = GetMipLevelPhysicalSize(level);
imageRange.baseMipLevel = level;
for (uint32_t layer = range.baseArrayLayer;
layer < range.baseArrayLayer + range.layerCount; ++layer) {
@@ -1055,7 +1057,7 @@ namespace dawn_native { namespace vulkan {
TextureDataLayout dataLayout;
dataLayout.offset = uploadHandle.startOffset;
- dataLayout.rowsPerImage = GetHeight() / blockInfo.height;
+ dataLayout.rowsPerImage = copySize.height / blockInfo.height;
dataLayout.bytesPerRow = bytesPerRow;
TextureCopy textureCopy;
textureCopy.aspect = range.aspects;
@@ -1063,8 +1065,8 @@ namespace dawn_native { namespace vulkan {
textureCopy.origin = {0, 0, layer};
textureCopy.texture = this;
- regions.push_back(ComputeBufferImageCopyRegion(dataLayout, textureCopy,
- GetMipLevelPhysicalSize(level)));
+ regions.push_back(
+ ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
}
}
device->fn.CmdCopyBufferToImage(
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
index 013a3b6217a..908e468792f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
@@ -78,7 +78,7 @@ namespace dawn_native { namespace vulkan {
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages);
void TransitionUsageForPass(CommandRecordingContext* recordingContext,
- const PassTextureUsage& textureUsages,
+ const TextureSubresourceUsage& textureUsages,
std::vector<VkImageMemoryBarrier>* imageBarriers,
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
index 6167201df08..73753772a07 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
@@ -139,8 +139,22 @@ namespace dawn_native { namespace vulkan {
break;
}
+ case wgpu::TextureDimension::e3D: {
+ region.imageOffset.x = textureCopy.origin.x;
+ region.imageOffset.y = textureCopy.origin.y;
+ region.imageOffset.z = textureCopy.origin.z;
+
+ region.imageSubresource.baseArrayLayer = 0;
+ region.imageSubresource.layerCount = 1;
+
+ Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
+ region.imageExtent.width = imageExtent.width;
+ region.imageExtent.height = imageExtent.height;
+ region.imageExtent.depth = imageExtent.depthOrArrayLayers;
+ break;
+ }
+
case wgpu::TextureDimension::e1D:
- case wgpu::TextureDimension::e3D:
UNREACHABLE();
}
diff --git a/chromium/third_party/dawn/src/dawn_platform/WorkerThread.cpp b/chromium/third_party/dawn/src/dawn_platform/WorkerThread.cpp
index 64d09f153a1..7be4c3a63ee 100644
--- a/chromium/third_party/dawn/src/dawn_platform/WorkerThread.cpp
+++ b/chromium/third_party/dawn/src/dawn_platform/WorkerThread.cpp
@@ -25,9 +25,6 @@ namespace {
explicit AsyncWaitableEvent(std::function<void()> func) {
mFuture = std::async(std::launch::async, func);
}
- virtual ~AsyncWaitableEvent() override {
- ASSERT(IsComplete());
- }
void Wait() override {
ASSERT(mFuture.valid());
mFuture.wait();
@@ -38,6 +35,11 @@ namespace {
}
private:
+ // It is safe not to call Wait() in the destructor of AsyncWaitableEvent because since
+ // C++14 the destructor of std::future will always be blocked until its state becomes
+ // std::future_status::ready when it was created by a call of std::async and it is the
+ // last reference to the shared state.
+ // See https://en.cppreference.com/w/cpp/thread/future/~future for more details.
std::future<void> mFuture;
};
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp b/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
index 481ced43775..aa8301379a6 100644
--- a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
@@ -33,6 +33,10 @@ namespace dawn_wire {
return mImpl->ReserveTexture(device);
}
+ ReservedSwapChain WireClient::ReserveSwapChain(WGPUDevice device) {
+ return mImpl->ReserveSwapChain(device);
+ }
+
ReservedDevice WireClient::ReserveDevice() {
return mImpl->ReserveDevice();
}
@@ -41,6 +45,10 @@ namespace dawn_wire {
mImpl->ReclaimTextureReservation(reservation);
}
+ void WireClient::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+ mImpl->ReclaimSwapChainReservation(reservation);
+ }
+
void WireClient::ReclaimDeviceReservation(const ReservedDevice& reservation) {
mImpl->ReclaimDeviceReservation(reservation);
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp b/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
index a3599f4649f..bb3d7ba123e 100644
--- a/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
@@ -39,6 +39,14 @@ namespace dawn_wire {
return mImpl->InjectTexture(texture, id, generation, deviceId, deviceGeneration);
}
+ bool WireServer::InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ return mImpl->InjectSwapChain(swapchain, id, generation, deviceId, deviceGeneration);
+ }
+
bool WireServer::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
return mImpl->InjectDevice(device, id, generation);
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
index 3c7519c6ed8..6c9c70e7f6e 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
@@ -119,6 +119,8 @@ namespace dawn_wire { namespace client {
}
}
mRequests.clear();
+
+ FreeMappedData(true);
}
void Buffer::CancelCallbacksForDisconnect() {
@@ -360,15 +362,9 @@ namespace dawn_wire { namespace client {
mWriteHandle->SerializeFlush(writeHandleBuffer);
return WireResult::Success;
});
- mWriteHandle = nullptr;
-
- } else if (mReadHandle) {
- mReadHandle = nullptr;
}
- mMappedData = nullptr;
- mMapOffset = 0;
- mMapSize = 0;
+ FreeMappedData(false);
// Tag all mapping requests still in flight as unmapped before callback.
for (auto& it : mRequests) {
@@ -384,9 +380,7 @@ namespace dawn_wire { namespace client {
void Buffer::Destroy() {
// Remove the current mapping.
- mWriteHandle = nullptr;
- mReadHandle = nullptr;
- mMappedData = nullptr;
+ FreeMappedData(true);
// Tag all mapping requests still in flight as destroyed before callback.
for (auto& it : mRequests) {
@@ -420,4 +414,22 @@ namespace dawn_wire { namespace client {
size_t offsetInMappedRange = offset - mMapOffset;
return offsetInMappedRange <= mMapSize - size;
}
+
+ void Buffer::FreeMappedData(bool destruction) {
+#if defined(DAWN_ENABLE_ASSERTS)
+ // When in "debug" mode, 0xCA-out the mapped data when we free it so that in we can detect
+ // use-after-free of the mapped data. This is particularly useful for WebGPU test about the
+ // interaction of mapping and GC.
+ if (mMappedData && destruction) {
+ memset(mMappedData, 0xCA, mMapSize);
+ }
+#endif // defined(DAWN_ENABLE_ASSERTS)
+
+ mMapOffset = 0;
+ mMapSize = 0;
+ mWriteHandle = nullptr;
+ mReadHandle = nullptr;
+ mMappedData = nullptr;
+ }
+
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
index 5e0d5ec2a34..50b9639f3fe 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
@@ -57,6 +57,8 @@ namespace dawn_wire { namespace client {
bool IsMappedForWriting() const;
bool CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const;
+ void FreeMappedData(bool destruction);
+
Device* mDevice;
// We want to defer all the validation to the server, which means we could have multiple
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
index 3b29f9e7eed..a00bb5e90f7 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
@@ -96,6 +96,18 @@ namespace dawn_wire { namespace client {
return result;
}
+ ReservedSwapChain Client::ReserveSwapChain(WGPUDevice device) {
+ auto* allocation = SwapChainAllocator().New(this);
+
+ ReservedSwapChain result;
+ result.swapchain = ToAPI(allocation->object.get());
+ result.id = allocation->object->id;
+ result.generation = allocation->generation;
+ result.deviceId = FromAPI(device)->id;
+ result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
+ return result;
+ }
+
ReservedDevice Client::ReserveDevice() {
auto* allocation = DeviceAllocator().New(this);
@@ -110,6 +122,10 @@ namespace dawn_wire { namespace client {
TextureAllocator().Free(FromAPI(reservation.texture));
}
+ void Client::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+ SwapChainAllocator().Free(FromAPI(reservation.swapchain));
+ }
+
void Client::ReclaimDeviceReservation(const ReservedDevice& reservation) {
DeviceAllocator().Free(FromAPI(reservation.device));
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.h b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
index db9ed431192..3616e372155 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
@@ -44,9 +44,11 @@ namespace dawn_wire { namespace client {
}
ReservedTexture ReserveTexture(WGPUDevice device);
+ ReservedSwapChain ReserveSwapChain(WGPUDevice device);
ReservedDevice ReserveDevice();
void ReclaimTextureReservation(const ReservedTexture& reservation);
+ void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
void ReclaimDeviceReservation(const ReservedDevice& reservation);
template <typename Cmd>
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
index ea0d8fcfc55..24fbedabd2a 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
@@ -72,6 +72,38 @@ namespace dawn_wire { namespace server {
return true;
}
+ bool Server::InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ ASSERT(swapchain != nullptr);
+ ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr || device->generation != deviceGeneration) {
+ return false;
+ }
+
+ ObjectData<WGPUSwapChain>* data = SwapChainObjects().Allocate(id);
+ if (data == nullptr) {
+ return false;
+ }
+
+ data->handle = swapchain;
+ data->generation = generation;
+ data->state = AllocationState::Allocated;
+ data->deviceInfo = device->info.get();
+
+ if (!TrackDeviceChild(data->deviceInfo, ObjectType::SwapChain, id)) {
+ return false;
+ }
+
+ // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
+ // message from the client. Add a reference to counterbalance the eventual release.
+ mProcs.swapChainReference(swapchain);
+
+ return true;
+ }
+
bool Server::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
ASSERT(device != nullptr);
ObjectData<WGPUDevice>* data = DeviceObjects().Allocate(id);
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.h b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
index 5baea199ea9..1979d87d233 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
@@ -180,6 +180,12 @@ namespace dawn_wire { namespace server {
uint32_t deviceId,
uint32_t deviceGeneration);
+ bool InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration);
+
bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
WGPUDevice GetDevice(uint32_t id, uint32_t generation);
diff --git a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
index b85a988946b..e545ee1949a 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
@@ -56,6 +56,8 @@ namespace dawn_native { namespace d3d12 {
: ExternalImageAccessDescriptor {
public:
uint64_t acquireMutexKey;
+ // Release key will be set to acquireMutexKey + 1 if set to sentinel value UINT64_MAX.
+ uint64_t releaseMutexKey = UINT64_MAX;
bool isSwapChainTexture = false;
};
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
index 097fd1a5990..87f2bab0d7d 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
@@ -38,6 +38,14 @@ namespace dawn_wire {
uint32_t deviceGeneration;
};
+ struct ReservedSwapChain {
+ WGPUSwapChain swapchain;
+ uint32_t id;
+ uint32_t generation;
+ uint32_t deviceId;
+ uint32_t deviceGeneration;
+ };
+
struct ReservedDevice {
WGPUDevice device;
uint32_t id;
@@ -58,9 +66,11 @@ namespace dawn_wire {
size_t size) override final;
ReservedTexture ReserveTexture(WGPUDevice device);
+ ReservedSwapChain ReserveSwapChain(WGPUDevice device);
ReservedDevice ReserveDevice();
void ReclaimTextureReservation(const ReservedTexture& reservation);
+ void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
void ReclaimDeviceReservation(const ReservedDevice& reservation);
// Disconnects the client.
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
index 59df6a49477..14d2354580b 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
@@ -42,12 +42,16 @@ namespace dawn_wire {
const volatile char* HandleCommands(const volatile char* commands,
size_t size) override final;
- // TODO(enga): Remove defaults after updating Chrome.
bool InjectTexture(WGPUTexture texture,
uint32_t id,
uint32_t generation,
- uint32_t deviceId = 1,
- uint32_t deviceGeneration = 0);
+ uint32_t deviceId,
+ uint32_t deviceGeneration);
+ bool InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration);
bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
diff --git a/chromium/third_party/dawn/src/tests/BUILD.gn b/chromium/third_party/dawn/src/tests/BUILD.gn
index 18d2fb536eb..f90325d28d4 100644
--- a/chromium/third_party/dawn/src/tests/BUILD.gn
+++ b/chromium/third_party/dawn/src/tests/BUILD.gn
@@ -151,9 +151,12 @@ test("dawn_unittests") {
"${dawn_root}/src/dawn_wire/server/ServerMemoryTransferService_mock.cpp",
"${dawn_root}/src/dawn_wire/server/ServerMemoryTransferService_mock.h",
"MockCallback.h",
+ "ToggleParser.cpp",
+ "ToggleParser.h",
"unittests/BitSetIteratorTests.cpp",
"unittests/BuddyAllocatorTests.cpp",
"unittests/BuddyMemoryAllocatorTests.cpp",
+ "unittests/ChainUtilsTests.cpp",
"unittests/CommandAllocatorTests.cpp",
"unittests/EnumClassBitmasksTests.cpp",
"unittests/EnumMaskIteratorTests.cpp",
@@ -188,6 +191,7 @@ test("dawn_unittests") {
"unittests/validation/ComputeIndirectValidationTests.cpp",
"unittests/validation/ComputeValidationTests.cpp",
"unittests/validation/CopyCommandsValidationTests.cpp",
+ "unittests/validation/CopyTextureForBrowserTests.cpp",
"unittests/validation/DebugMarkerValidationTests.cpp",
"unittests/validation/DrawIndirectValidationTests.cpp",
"unittests/validation/DynamicStateCommandValidationTests.cpp",
@@ -230,6 +234,7 @@ test("dawn_unittests") {
"unittests/wire/WireExtensionTests.cpp",
"unittests/wire/WireFenceTests.cpp",
"unittests/wire/WireInjectDeviceTests.cpp",
+ "unittests/wire/WireInjectSwapChainTests.cpp",
"unittests/wire/WireInjectTextureTests.cpp",
"unittests/wire/WireMemoryTransferServiceTests.cpp",
"unittests/wire/WireOptionalTests.cpp",
@@ -279,6 +284,9 @@ source_set("dawn_end2end_tests_sources") {
sources = [
"DawnTest.h",
"MockCallback.h",
+ "ParamGenerator.h",
+ "ToggleParser.cpp",
+ "ToggleParser.h",
"end2end/BasicTests.cpp",
"end2end/BindGroupTests.cpp",
"end2end/BufferTests.cpp",
@@ -403,7 +411,11 @@ source_set("dawn_white_box_tests_sources") {
"${dawn_root}/src/utils:dawn_utils",
]
- sources = [ "DawnTest.h" ]
+ sources = [
+ "DawnTest.h",
+ "ParamGenerator.h",
+ "ToggleParser.h",
+ ]
if (dawn_enable_vulkan) {
deps += [ "${dawn_root}/third_party/khronos:vulkan_headers" ]
@@ -504,12 +516,15 @@ test("dawn_perf_tests") {
"DawnTest.cpp",
"DawnTest.h",
"ParamGenerator.h",
+ "ToggleParser.cpp",
+ "ToggleParser.h",
"perf_tests/BufferUploadPerf.cpp",
"perf_tests/DawnPerfTest.cpp",
"perf_tests/DawnPerfTest.h",
"perf_tests/DawnPerfTestPlatform.cpp",
"perf_tests/DawnPerfTestPlatform.h",
"perf_tests/DrawCallPerf.cpp",
+ "perf_tests/ShaderRobustnessPerf.cpp",
"perf_tests/SubresourceTrackingPerf.cpp",
]
diff --git a/chromium/third_party/dawn/src/utils/BUILD.gn b/chromium/third_party/dawn/src/utils/BUILD.gn
index 64431bfd378..1bd40735b9b 100644
--- a/chromium/third_party/dawn/src/utils/BUILD.gn
+++ b/chromium/third_party/dawn/src/utils/BUILD.gn
@@ -95,7 +95,7 @@ static_library("dawn_utils") {
libs = []
frameworks = []
- if (is_win) {
+ if (is_win && !dawn_is_winuwp) {
sources += [ "WindowsDebugLogger.cpp" ]
} else {
sources += [ "EmptyDebugLogger.cpp" ]
diff --git a/chromium/third_party/dawn/src/utils/BackendBinding.cpp b/chromium/third_party/dawn/src/utils/BackendBinding.cpp
index f0b79ee20fe..15562de02e3 100644
--- a/chromium/third_party/dawn/src/utils/BackendBinding.cpp
+++ b/chromium/third_party/dawn/src/utils/BackendBinding.cpp
@@ -86,8 +86,12 @@ namespace utils {
return CreateNullBinding(window, device);
#endif
-#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
case wgpu::BackendType::OpenGL:
+ return CreateOpenGLBinding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
case wgpu::BackendType::OpenGLES:
return CreateOpenGLBinding(window, device);
#endif
diff --git a/chromium/third_party/dawn/src/utils/TestUtils.cpp b/chromium/third_party/dawn/src/utils/TestUtils.cpp
index 673ad78732a..acae87e3d6f 100644
--- a/chromium/third_party/dawn/src/utils/TestUtils.cpp
+++ b/chromium/third_party/dawn/src/utils/TestUtils.cpp
@@ -39,8 +39,8 @@ namespace utils {
TextureDataCopyLayout layout;
- layout.mipSize = {textureSizeAtLevel0.width >> mipmapLevel,
- textureSizeAtLevel0.height >> mipmapLevel,
+ layout.mipSize = {std::max(textureSizeAtLevel0.width >> mipmapLevel, 1u),
+ std::max(textureSizeAtLevel0.height >> mipmapLevel, 1u),
textureSizeAtLevel0.depthOrArrayLayers};
layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
index d8f7f3fd53f..c560c391130 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
@@ -104,14 +104,14 @@ namespace utils {
uint32_t colorAttachmentIndex = 0;
for (const wgpu::TextureView& colorAttachment : colorAttachmentInfo) {
if (colorAttachment.Get() != nullptr) {
- cColorAttachments[colorAttachmentIndex].attachment = colorAttachment;
+ cColorAttachments[colorAttachmentIndex].view = colorAttachment;
}
++colorAttachmentIndex;
}
colorAttachments = cColorAttachments.data();
if (depthStencil.Get() != nullptr) {
- cDepthStencilAttachmentInfo.attachment = depthStencil;
+ cDepthStencilAttachmentInfo.view = depthStencil;
depthStencilAttachment = &cDepthStencilAttachmentInfo;
} else {
depthStencilAttachment = nullptr;
@@ -226,6 +226,14 @@ namespace utils {
return device.CreatePipelineLayout(&descriptor);
}
+ wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+ std::vector<wgpu::BindGroupLayout> bgls) {
+ wgpu::PipelineLayoutDescriptor descriptor;
+ descriptor.bindGroupLayoutCount = uint32_t(bgls.size());
+ descriptor.bindGroupLayouts = bgls.data();
+ return device.CreatePipelineLayout(&descriptor);
+ }
+
wgpu::BindGroupLayout MakeBindGroupLayout(
const wgpu::Device& device,
std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer) {
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
index 5230ebff782..26c01fea0f5 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.h
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
@@ -28,8 +28,6 @@ namespace utils {
enum Expectation { Success, Failure };
- enum class SingleShaderStage { Vertex, Fragment, Compute };
-
wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source);
@@ -96,6 +94,9 @@ namespace utils {
wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
const wgpu::BindGroupLayout* bindGroupLayout);
+ wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+ std::vector<wgpu::BindGroupLayout> bgls);
+
// Helpers to make creating bind group layouts look nicer:
//
// utils::MakeBindGroupLayout(device, {