summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn/src
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/third_party/dawn/src
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/dawn/src')
-rw-r--r--chromium/third_party/dawn/src/common/BUILD.gn6
-rw-r--r--chromium/third_party/dawn/src/common/BitSetIterator.h6
-rw-r--r--chromium/third_party/dawn/src/common/CMakeLists.txt5
-rw-r--r--chromium/third_party/dawn/src/common/Compiler.h6
-rw-r--r--chromium/third_party/dawn/src/common/HashUtils.h18
-rw-r--r--chromium/third_party/dawn/src/common/SystemUtils.cpp26
-rw-r--r--chromium/third_party/dawn/src/common/SystemUtils.h17
-rw-r--r--chromium/third_party/dawn/src/common/TypedInteger.h212
-rw-r--r--chromium/third_party/dawn/src/common/UnderlyingType.h51
-rw-r--r--chromium/third_party/dawn/src/common/ityp_array.h96
-rw-r--r--chromium/third_party/dawn/src/common/ityp_bitset.h134
-rw-r--r--chromium/third_party/dawn/src/common/ityp_span.h103
-rw-r--r--chromium/third_party/dawn/src/common/windows_with_undefs.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BUILD.gn12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.cpp51
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp253
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindingInfo.h20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.cpp141
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CMakeLists.txt12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp186
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DawnNative.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.cpp119
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.h27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Error.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Error.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Extensions.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Extensions.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Forward.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/MapRequestTracker.cpp46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/MapRequestTracker.h44
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp97
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.h21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.cpp153
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.h61
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.cpp75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.h21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp279
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.h23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.cpp121
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.h37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ToBackend.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp86
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp61
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp330
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp117
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h54
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp76
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.h80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp67
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp150
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp167
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h108
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp162
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp56
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp444
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h60
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm92
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm195
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/Forward.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm128
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp43
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp173
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/Forward.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.cpp32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.h36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp101
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp102
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp87
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp150
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp199
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp371
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h65
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.cpp320
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.h141
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp30
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp238
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp142
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ObjectAllocator.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp17
-rw-r--r--chromium/third_party/dawn/src/fuzzers/BUILD.gn83
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_wsi.h8
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h5
-rw-r--r--chromium/third_party/dawn/src/tests/BUILD.gn12
-rw-r--r--chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp168
-rw-r--r--chromium/third_party/dawn/src/utils/TextureFormatUtils.h3
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.cpp53
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.h23
197 files changed, 6737 insertions, 2521 deletions
diff --git a/chromium/third_party/dawn/src/common/BUILD.gn b/chromium/third_party/dawn/src/common/BUILD.gn
index 3f5a09b3782..59a23057224 100644
--- a/chromium/third_party/dawn/src/common/BUILD.gn
+++ b/chromium/third_party/dawn/src/common/BUILD.gn
@@ -96,9 +96,11 @@ config("dawn_internal") {
"-Wc++11-narrowing",
"-Wdeprecated-copy",
"-Wextra-semi-stmt",
+ "-Wimplicit-fallthrough",
"-Winconsistent-missing-destructor-override",
"-Winvalid-offsetof",
"-Wmissing-field-initializers",
+ "-Wnon-c-typedef-for-linkage",
"-Wpessimizing-move",
"-Wreturn-std-move-in-c++11",
"-Wshadow-field",
@@ -166,6 +168,10 @@ if (is_win || is_linux || is_mac || is_fuchsia || is_android) {
"SwapChainUtils.h",
"SystemUtils.cpp",
"SystemUtils.h",
+ "TypedInteger.h",
+ "ityp_array.h",
+ "ityp_bitset.h",
+ "ityp_span.h",
"vulkan_platform.h",
"windows_with_undefs.h",
"xlib_with_undefs.h",
diff --git a/chromium/third_party/dawn/src/common/BitSetIterator.h b/chromium/third_party/dawn/src/common/BitSetIterator.h
index 1a7fd606962..d35bc8a2df0 100644
--- a/chromium/third_party/dawn/src/common/BitSetIterator.h
+++ b/chromium/third_party/dawn/src/common/BitSetIterator.h
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "common/Math.h"
+#include "common/UnderlyingType.h"
#include <bitset>
#include <limits>
@@ -44,8 +45,11 @@ class BitSetIterator final {
bool operator==(const Iterator& other) const;
bool operator!=(const Iterator& other) const;
+
T operator*() const {
- return static_cast<T>(mCurrentBit);
+ using U = UnderlyingType<T>;
+ ASSERT(mCurrentBit <= std::numeric_limits<U>::max());
+ return static_cast<T>(static_cast<U>(mCurrentBit));
}
private:
diff --git a/chromium/third_party/dawn/src/common/CMakeLists.txt b/chromium/third_party/dawn/src/common/CMakeLists.txt
index 2e909b33b4e..1ab20234b1f 100644
--- a/chromium/third_party/dawn/src/common/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/common/CMakeLists.txt
@@ -44,6 +44,11 @@ target_sources(dawn_common PRIVATE
"SwapChainUtils.h"
"SystemUtils.cpp"
"SystemUtils.h"
+ "TypedInteger.h"
+ "UnderlyingType.h"
+ "ityp_array.h"
+ "ityp_bitset.h"
+ "ityp_span.h"
"vulkan_platform.h"
"windows_with_undefs.h"
"xlib_with_undefs.h"
diff --git a/chromium/third_party/dawn/src/common/Compiler.h b/chromium/third_party/dawn/src/common/Compiler.h
index 8e425c90588..49e6db4c5e1 100644
--- a/chromium/third_party/dawn/src/common/Compiler.h
+++ b/chromium/third_party/dawn/src/common/Compiler.h
@@ -107,4 +107,10 @@ extern void __cdecl __debugbreak(void);
# define DAWN_FORCE_INLINE inline
#endif
+#if defined(__clang__)
+# define DAWN_FALLTHROUGH [[clang::fallthrough]]
+#else
+# define DAWN_FALLTHROUGH
+#endif
+
#endif // COMMON_COMPILER_H_
diff --git a/chromium/third_party/dawn/src/common/HashUtils.h b/chromium/third_party/dawn/src/common/HashUtils.h
index c42b60e02e7..9d10ca713c2 100644
--- a/chromium/third_party/dawn/src/common/HashUtils.h
+++ b/chromium/third_party/dawn/src/common/HashUtils.h
@@ -16,6 +16,8 @@
#define COMMON_HASHUTILS_H_
#include "common/Platform.h"
+#include "common/TypedInteger.h"
+#include "common/ityp_bitset.h"
#include <bitset>
#include <functional>
@@ -27,6 +29,12 @@ size_t Hash(const T& value) {
return std::hash<T>()(value);
}
+// Add hashing of TypedIntegers
+template <typename Tag, typename T>
+size_t Hash(const TypedInteger<Tag, T>& value) {
+ return Hash(static_cast<T>(value));
+}
+
// When hashing sparse structures we want to iteratively build a hash value with only parts of the
// data. HashCombine "hashes" together an existing hash and hashable values.
//
@@ -80,4 +88,14 @@ size_t Hash(const std::bitset<N>& value) {
}
#endif
+namespace std {
+ template <typename Index, size_t N>
+ class hash<ityp::bitset<Index, N>> {
+ public:
+ size_t operator()(const ityp::bitset<Index, N>& value) const {
+ return Hash(static_cast<const std::bitset<N>&>(value));
+ }
+ };
+} // namespace std
+
#endif // COMMON_HASHUTILS_H_
diff --git a/chromium/third_party/dawn/src/common/SystemUtils.cpp b/chromium/third_party/dawn/src/common/SystemUtils.cpp
index 73aa4ee640d..f8282eb4141 100644
--- a/chromium/third_party/dawn/src/common/SystemUtils.cpp
+++ b/chromium/third_party/dawn/src/common/SystemUtils.cpp
@@ -14,6 +14,8 @@
#include "common/SystemUtils.h"
+#include "common/Assert.h"
+
#if defined(DAWN_PLATFORM_WINDOWS)
# include <Windows.h>
# include <vector>
@@ -115,3 +117,27 @@ std::string GetExecutableDirectory() {
size_t lastPathSepLoc = exePath.find_last_of(GetPathSeparator());
return lastPathSepLoc != std::string::npos ? exePath.substr(0, lastPathSepLoc + 1) : "";
}
+
+// ScopedEnvironmentVar
+
+ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
+ : mName(variableName),
+ mOriginalValue(GetEnvironmentVar(variableName)),
+ mIsSet(SetEnvironmentVar(variableName, value)) {
+}
+
+ScopedEnvironmentVar::~ScopedEnvironmentVar() {
+ if (mIsSet) {
+ bool success = SetEnvironmentVar(mName.c_str(), mOriginalValue.c_str());
+ // If we set the environment variable in the constructor, we should never fail restoring it.
+ ASSERT(success);
+ }
+}
+
+bool ScopedEnvironmentVar::Set(const char* variableName, const char* value) {
+ ASSERT(!mIsSet);
+ mName = variableName;
+ mOriginalValue = GetEnvironmentVar(variableName);
+ mIsSet = SetEnvironmentVar(variableName, value);
+ return mIsSet;
+}
diff --git a/chromium/third_party/dawn/src/common/SystemUtils.h b/chromium/third_party/dawn/src/common/SystemUtils.h
index 2edf1e3a257..ed18c31e661 100644
--- a/chromium/third_party/dawn/src/common/SystemUtils.h
+++ b/chromium/third_party/dawn/src/common/SystemUtils.h
@@ -24,4 +24,21 @@ std::string GetEnvironmentVar(const char* variableName);
bool SetEnvironmentVar(const char* variableName, const char* value);
std::string GetExecutableDirectory();
+class ScopedEnvironmentVar {
+ public:
+ ScopedEnvironmentVar() = default;
+ ScopedEnvironmentVar(const char* variableName, const char* value);
+ ~ScopedEnvironmentVar();
+
+ ScopedEnvironmentVar(const ScopedEnvironmentVar& rhs) = delete;
+ ScopedEnvironmentVar& operator=(const ScopedEnvironmentVar& rhs) = delete;
+
+ bool Set(const char* variableName, const char* value);
+
+ private:
+ std::string mName;
+ std::string mOriginalValue;
+ bool mIsSet = false;
+};
+
#endif // COMMON_SYSTEMUTILS_H_
diff --git a/chromium/third_party/dawn/src/common/TypedInteger.h b/chromium/third_party/dawn/src/common/TypedInteger.h
new file mode 100644
index 00000000000..5474d9a920b
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/TypedInteger.h
@@ -0,0 +1,212 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_TYPEDINTEGER_H_
+#define COMMON_TYPEDINTEGER_H_
+
+#include "common/Assert.h"
+
+#include <limits>
+#include <type_traits>
+
+// TypedInteger is helper class that provides additional type safety in Debug.
+// - Integers of different (Tag, BaseIntegerType) may not be used interoperably
+// - Allows casts only to the underlying type.
+// - Integers of the same (Tag, BaseIntegerType) may be compared or assigned.
+// This class helps ensure that the many types of indices in Dawn aren't mixed up and used
+// interchangably.
+// In Release builds, when DAWN_ENABLE_ASSERTS is not defined, TypedInteger is a passthrough
+// typedef of the underlying type.
+//
+// Example:
+// using UintA = TypedInteger<struct TypeA, uint32_t>;
+// using UintB = TypedInteger<struct TypeB, uint32_t>;
+//
+// in Release:
+// using UintA = uint32_t;
+// using UintB = uint32_t;
+//
+// in Debug:
+// using UintA = detail::TypedIntegerImpl<struct TypeA, uint32_t>;
+// using UintB = detail::TypedIntegerImpl<struct TypeB, uint32_t>;
+//
+// Assignment, construction, comparison, and arithmetic with TypedIntegerImpl are allowed
+// only for typed integers of exactly the same type. Further, they must be
+// created / cast explicitly; there is no implicit conversion.
+//
+// UintA a(2);
+// uint32_t aValue = static_cast<uint32_t>(a);
+//
+namespace detail {
+ template <typename Tag, typename T>
+ class TypedIntegerImpl;
+} // namespace detail
+
+template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
+#if defined(DAWN_ENABLE_ASSERTS)
+using TypedInteger = detail::TypedIntegerImpl<Tag, T>;
+#else
+using TypedInteger = T;
+#endif
+
+namespace detail {
+ template <typename Tag, typename T>
+ class alignas(T) TypedIntegerImpl {
+ static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
+ T mValue;
+
+ public:
+ constexpr TypedIntegerImpl() : mValue(0) {
+ static_assert(alignof(TypedIntegerImpl) == alignof(T), "");
+ static_assert(sizeof(TypedIntegerImpl) == sizeof(T), "");
+ }
+
+ // Construction from non-narrowing integral types.
+ template <typename I,
+ typename = std::enable_if_t<
+ std::is_integral<I>::value &&
+ std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
+ std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
+ explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {
+ }
+
+ // Allow explicit casts only to the underlying type. If you're casting out of an
+ // TypedInteger, you should know what what you're doing, and exactly what type you
+ // expect.
+ explicit constexpr operator T() const {
+ return static_cast<T>(this->mValue);
+ }
+
+// Same-tag TypedInteger comparison operators
+#define TYPED_COMPARISON(op) \
+ constexpr bool operator op(const TypedIntegerImpl& rhs) const { \
+ return mValue op rhs.mValue; \
+ }
+ TYPED_COMPARISON(<)
+ TYPED_COMPARISON(<=)
+ TYPED_COMPARISON(>)
+ TYPED_COMPARISON(>=)
+ TYPED_COMPARISON(==)
+ TYPED_COMPARISON(!=)
+#undef TYPED_COMPARISON
+
+ // Increment / decrement operators for for-loop iteration
+ constexpr TypedIntegerImpl& operator++() {
+ ASSERT(this->mValue < std::numeric_limits<T>::max());
+ ++this->mValue;
+ return *this;
+ }
+
+ constexpr TypedIntegerImpl operator++(int) {
+ TypedIntegerImpl ret = *this;
+
+ ASSERT(this->mValue < std::numeric_limits<T>::max());
+ ++this->mValue;
+ return ret;
+ }
+
+ constexpr TypedIntegerImpl& operator--() {
+ assert(this->mValue > std::numeric_limits<T>::min());
+ --this->mValue;
+ return *this;
+ }
+
+ constexpr TypedIntegerImpl operator--(int) {
+ TypedIntegerImpl ret = *this;
+
+ ASSERT(this->mValue > std::numeric_limits<T>::min());
+ --this->mValue;
+ return ret;
+ }
+
+ template <typename T2 = T>
+ constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
+ static_assert(std::is_same<T, T2>::value, "");
+ // The negation of the most negative value cannot be represented.
+ ASSERT(this->mValue != std::numeric_limits<T>::min());
+ return TypedIntegerImpl(-this->mValue);
+ }
+
+ template <typename T2 = T>
+ constexpr std::enable_if_t<std::is_unsigned<T2>::value, TypedIntegerImpl> operator+(
+ TypedIntegerImpl rhs) const {
+ static_assert(std::is_same<T, T2>::value, "");
+ // Overflow would wrap around
+ ASSERT(this->mValue + rhs.mValue >= this->mValue);
+
+ return TypedIntegerImpl(this->mValue + rhs.mValue);
+ }
+
+ template <typename T2 = T>
+ constexpr std::enable_if_t<std::is_unsigned<T2>::value, TypedIntegerImpl> operator-(
+ TypedIntegerImpl rhs) const {
+ static_assert(std::is_same<T, T2>::value, "");
+ // Overflow would wrap around
+ ASSERT(this->mValue - rhs.mValue <= this->mValue);
+ return TypedIntegerImpl(this->mValue - rhs.mValue);
+ }
+
+ template <typename T2 = T>
+ constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator+(
+ TypedIntegerImpl rhs) const {
+ static_assert(std::is_same<T, T2>::value, "");
+ if (this->mValue > 0) {
+ // rhs is positive: |rhs| is at most the distance between max and |this|.
+ // rhs is negative: (positive + negative) won't overflow
+ ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - this->mValue);
+ } else {
+ // rhs is postive: (negative + positive) won't underflow
+ // rhs is negative: |rhs| isn't less than the (negative) distance between min
+ // and |this|
+ ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - this->mValue);
+ }
+ return TypedIntegerImpl(this->mValue + rhs.mValue);
+ }
+
+ template <typename T2 = T>
+ constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-(
+ TypedIntegerImpl rhs) const {
+ static_assert(std::is_same<T, T2>::value, "");
+ if (this->mValue > 0) {
+ // rhs is positive: positive minus positive won't overflow
+ // rhs is negative: |rhs| isn't less than the (negative) distance between |this|
+ // and max.
+ ASSERT(rhs.mValue >= this->mValue - std::numeric_limits<T>::max());
+ } else {
+ // rhs is positive: |rhs| is at most the distance between min and |this|
+ // rhs is negative: negative minus negative won't overflow
+ ASSERT(rhs.mValue <= this->mValue - std::numeric_limits<T>::min());
+ }
+ return TypedIntegerImpl(this->mValue - rhs.mValue);
+ }
+ };
+
+} // namespace detail
+
+namespace std {
+
+ template <typename Tag, typename T>
+ class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
+ public:
+ static detail::TypedIntegerImpl<Tag, T> max() noexcept {
+ return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
+ }
+ static detail::TypedIntegerImpl<Tag, T> min() noexcept {
+ return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
+ }
+ };
+
+} // namespace std
+
+#endif // COMMON_TYPEDINTEGER_H_
diff --git a/chromium/third_party/dawn/src/common/UnderlyingType.h b/chromium/third_party/dawn/src/common/UnderlyingType.h
new file mode 100644
index 00000000000..09c72c023f9
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/UnderlyingType.h
@@ -0,0 +1,51 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_UNDERLYINGTYPE_H_
+#define COMMON_UNDERLYINGTYPE_H_
+
+#include <type_traits>
+
+// UnderlyingType is similar to std::underlying_type_t. It is a passthrough for already
+// integer types which simplifies getting the underlying primitive type for an arbitrary
+// template parameter. It includes a specialization for detail::TypedIntegerImpl which yields
+// the wrapped integer type.
+namespace detail {
+ template <typename T, typename Enable = void>
+ struct UnderlyingTypeImpl;
+
+ template <typename I>
+ struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
+ using type = I;
+ };
+
+ template <typename E>
+ struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
+ using type = std::underlying_type_t<E>;
+ };
+
+ // Forward declare the TypedInteger impl.
+ template <typename Tag, typename T>
+ class TypedIntegerImpl;
+
+ template <typename Tag, typename I>
+ struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
+ using type = typename UnderlyingTypeImpl<I>::type;
+ };
+} // namespace detail
+
+template <typename T>
+using UnderlyingType = typename detail::UnderlyingTypeImpl<T>::type;
+
+#endif // COMMON_UNDERLYINGTYPE_H_
diff --git a/chromium/third_party/dawn/src/common/ityp_array.h b/chromium/third_party/dawn/src/common/ityp_array.h
new file mode 100644
index 00000000000..d413ebc0ba0
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/ityp_array.h
@@ -0,0 +1,96 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_ARRAY_H_
+#define COMMON_ITYP_ARRAY_H_
+
+#include "common/TypedInteger.h"
+#include "common/UnderlyingType.h"
+
+#include <array>
+#include <type_traits>
+
+namespace ityp {
+
+ // ityp::array is a helper class that wraps std::array with the restriction that
+ // indices must be a particular type |Index|. Dawn uses multiple flat maps of
+ // index-->data, and this class helps ensure an indices cannot be passed interchangably
+ // to a flat map of a different type.
+ template <typename Index, typename Value, size_t Size>
+ class array : private std::array<Value, Size> {
+ using I = UnderlyingType<Index>;
+ using Base = std::array<Value, Size>;
+
+ static_assert(Size <= std::numeric_limits<I>::max(), "");
+
+ public:
+ constexpr array() = default;
+
+ template <typename... Values>
+ constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {
+ }
+
+ Value& operator[](Index i) {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < Size);
+ return Base::operator[](index);
+ }
+
+ constexpr const Value& operator[](Index i) const {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < Size);
+ return Base::operator[](index);
+ }
+
+ Value& at(Index i) {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < Size);
+ return Base::at(index);
+ }
+
+ constexpr const Value& at(Index i) const {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < Size);
+ return Base::at(index);
+ }
+
+ Value* begin() noexcept {
+ return Base::begin();
+ }
+
+ const Value* begin() const noexcept {
+ return Base::begin();
+ }
+
+ Value* end() noexcept {
+ return Base::end();
+ }
+
+ const Value* end() const noexcept {
+ return Base::end();
+ }
+
+ constexpr Index size() const {
+ return Index(static_cast<I>(Size));
+ }
+
+ using Base::back;
+ using Base::data;
+ using Base::empty;
+ using Base::front;
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_ARRAY_H_
diff --git a/chromium/third_party/dawn/src/common/ityp_bitset.h b/chromium/third_party/dawn/src/common/ityp_bitset.h
new file mode 100644
index 00000000000..ef351d47d76
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/ityp_bitset.h
@@ -0,0 +1,134 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_BITSET_H_
+#define COMMON_ITYP_BITSET_H_
+
+#include "common/BitSetIterator.h"
+#include "common/TypedInteger.h"
+#include "common/UnderlyingType.h"
+
+namespace ityp {
+
+ // ityp::bitset is a helper class that wraps std::bitset with the restriction that
+ // indices must be a particular type |Index|.
+ template <typename Index, size_t N>
+ class bitset : private std::bitset<N> {
+ using I = UnderlyingType<Index>;
+ using Base = std::bitset<N>;
+
+ static_assert(sizeof(I) <= sizeof(size_t), "");
+
+ constexpr bitset(const Base& rhs) : Base(rhs) {
+ }
+
+ public:
+ constexpr bitset() noexcept : Base() {
+ }
+
+ constexpr bitset(unsigned long long value) noexcept : Base(value) {
+ }
+
+ constexpr bool operator[](Index i) const {
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ typename Base::reference operator[](Index i) {
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ bool test(Index i) const {
+ return Base::test(static_cast<I>(i));
+ }
+
+ using Base::all;
+ using Base::any;
+ using Base::count;
+ using Base::none;
+ using Base::size;
+
+ bool operator==(const bitset& other) const noexcept {
+ return Base::operator==(static_cast<const Base&>(other));
+ }
+
+ bool operator!=(const bitset& other) const noexcept {
+ return Base::operator!=(static_cast<const Base&>(other));
+ }
+
+ bitset& operator&=(const bitset& other) noexcept {
+ return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
+ }
+
+ bitset& operator|=(const bitset& other) noexcept {
+ return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
+ }
+
+ bitset& operator^=(const bitset& other) noexcept {
+ return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
+ }
+
+ bitset operator~() const noexcept {
+ return bitset(*this).flip();
+ }
+
+ bitset& set() noexcept {
+ return static_cast<bitset&>(Base::set());
+ }
+
+ bitset& set(Index i, bool value = true) {
+ return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
+ }
+
+ bitset& reset() noexcept {
+ return static_cast<bitset&>(Base::reset());
+ }
+
+ bitset& reset(Index i) {
+ return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
+ }
+
+ bitset& flip() noexcept {
+ return static_cast<bitset&>(Base::flip());
+ }
+
+ bitset& flip(Index i) {
+ return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
+ }
+
+ using Base::to_string;
+ using Base::to_ullong;
+ using Base::to_ulong;
+
+ friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
+ return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
+ }
+
+ friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
+ return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
+ }
+
+ friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
+ return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
+ }
+
+ friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
+ return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
+ }
+
+ friend class std::hash<bitset>;
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_BITSET_H_
diff --git a/chromium/third_party/dawn/src/common/ityp_span.h b/chromium/third_party/dawn/src/common/ityp_span.h
new file mode 100644
index 00000000000..00ba93f7503
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/ityp_span.h
@@ -0,0 +1,103 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_SPAN_H_
+#define COMMON_ITYP_SPAN_H_
+
+#include "common/TypedInteger.h"
+#include "common/UnderlyingType.h"
+
+#include <type_traits>
+
+namespace ityp {
+
+ // ityp::span is a helper class that wraps an unowned packed array of type |Value|.
+ // It stores the size and pointer to first element. It has the restriction that
+ // indices must be a particular type |Index|. This provides a type-safe way to index
+ // raw pointers.
+ template <typename Index, typename Value>
+ class span {
+ using I = UnderlyingType<Index>;
+
+ public:
+ constexpr span() : mData(nullptr), mSize(0) {
+ }
+ constexpr span(Value* data, Index size) : mData(data), mSize(size) {
+ }
+
+ constexpr Value& operator[](Index i) const {
+ ASSERT(i < mSize);
+ return mData[static_cast<I>(i)];
+ }
+
+ Value* data() noexcept {
+ return mData;
+ }
+
+ const Value* data() const noexcept {
+ return mData;
+ }
+
+ Value* begin() noexcept {
+ return mData;
+ }
+
+ const Value* begin() const noexcept {
+ return mData;
+ }
+
+ Value* end() noexcept {
+ return mData + static_cast<I>(mSize);
+ }
+
+ const Value* end() const noexcept {
+ return mData + static_cast<I>(mSize);
+ }
+
+ Value& front() {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *mData;
+ }
+
+ const Value& front() const {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *mData;
+ }
+
+ Value& back() {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *(mData + static_cast<I>(mSize) - 1);
+ }
+
+ const Value& back() const {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *(mData + static_cast<I>(mSize) - 1);
+ }
+
+ Index size() const {
+ return mSize;
+ }
+
+ private:
+ Value* mData;
+ Index mSize;
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_SPAN_H_
diff --git a/chromium/third_party/dawn/src/common/windows_with_undefs.h b/chromium/third_party/dawn/src/common/windows_with_undefs.h
index 381116a0243..6d8649ca8dd 100644
--- a/chromium/third_party/dawn/src/common/windows_with_undefs.h
+++ b/chromium/third_party/dawn/src/common/windows_with_undefs.h
@@ -15,7 +15,7 @@
#ifndef COMMON_WINDOWS_WITH_UNDEFS_H_
#define COMMON_WINDOWS_WITH_UNDEFS_H_
-#include "common/Compiler.h"
+#include "common/Platform.h"
#if !defined(DAWN_PLATFORM_WINDOWS)
# error "windows_with_undefs.h included on non-Windows"
diff --git a/chromium/third_party/dawn/src/dawn_native/BUILD.gn b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
index 8e97fae1316..f6974c8a3d0 100644
--- a/chromium/third_party/dawn/src/dawn_native/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
@@ -215,6 +215,8 @@ source_set("dawn_native_sources") {
"Forward.h",
"Instance.cpp",
"Instance.h",
+ "MapRequestTracker.cpp",
+ "MapRequestTracker.h",
"ObjectBase.cpp",
"ObjectBase.h",
"PassResourceUsage.h",
@@ -228,6 +230,8 @@ source_set("dawn_native_sources") {
"PipelineLayout.h",
"ProgrammablePassEncoder.cpp",
"ProgrammablePassEncoder.h",
+ "QuerySet.cpp",
+ "QuerySet.h",
"Queue.cpp",
"Queue.h",
"RenderBundle.cpp",
@@ -310,6 +314,8 @@ source_set("dawn_native_sources") {
"d3d12/HeapD3D12.h",
"d3d12/NativeSwapChainImplD3D12.cpp",
"d3d12/NativeSwapChainImplD3D12.h",
+ "d3d12/PageableD3D12.cpp",
+ "d3d12/PageableD3D12.h",
"d3d12/PipelineLayoutD3D12.cpp",
"d3d12/PipelineLayoutD3D12.h",
"d3d12/PlatformFunctions.cpp",
@@ -328,6 +334,8 @@ source_set("dawn_native_sources") {
"d3d12/ResourceHeapAllocationD3D12.h",
"d3d12/SamplerD3D12.cpp",
"d3d12/SamplerD3D12.h",
+ "d3d12/SamplerHeapCacheD3D12.cpp",
+ "d3d12/SamplerHeapCacheD3D12.h",
"d3d12/ShaderModuleD3D12.cpp",
"d3d12/ShaderModuleD3D12.h",
"d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp",
@@ -436,6 +444,8 @@ source_set("dawn_native_sources") {
"opengl/PipelineGL.h",
"opengl/PipelineLayoutGL.cpp",
"opengl/PipelineLayoutGL.h",
+ "opengl/QuerySetGL.cpp",
+ "opengl/QuerySetGL.h",
"opengl/QueueGL.cpp",
"opengl/QueueGL.h",
"opengl/RenderPipelineGL.cpp",
@@ -509,6 +519,8 @@ source_set("dawn_native_sources") {
"vulkan/UtilsVulkan.h",
"vulkan/VulkanError.cpp",
"vulkan/VulkanError.h",
+ "vulkan/VulkanExtensions.cpp",
+ "vulkan/VulkanExtensions.h",
"vulkan/VulkanFunctions.cpp",
"vulkan/VulkanFunctions.h",
"vulkan/VulkanInfo.cpp",
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
index 840c4047c38..4e4e67a447a 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
@@ -16,6 +16,7 @@
#include "common/Assert.h"
#include "common/Math.h"
+#include "common/ityp_bitset.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/Buffer.h"
#include "dawn_native/Device.h"
@@ -30,7 +31,8 @@ namespace dawn_native {
MaybeError ValidateBufferBinding(const DeviceBase* device,
const BindGroupEntry& entry,
- wgpu::BufferUsage requiredUsage) {
+ wgpu::BufferUsage requiredUsage,
+ const BindingInfo& bindingInfo) {
if (entry.buffer == nullptr || entry.sampler != nullptr ||
entry.textureView != nullptr) {
return DAWN_VALIDATION_ERROR("expected buffer binding");
@@ -69,6 +71,14 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("buffer binding usage mismatch");
}
+ if (bindingSize < bindingInfo.minBufferBindingSize) {
+ return DAWN_VALIDATION_ERROR(
+ "Binding size smaller than minimum buffer size: binding " +
+ std::to_string(entry.binding) + " given " + std::to_string(bindingSize) +
+ " bytes, required " + std::to_string(bindingInfo.minBufferBindingSize) +
+ " bytes");
+ }
+
return {};
}
@@ -153,13 +163,14 @@ namespace dawn_native {
}
DAWN_TRY(device->ValidateObject(descriptor->layout));
- if (descriptor->entryCount != descriptor->layout->GetBindingCount()) {
+
+ if (BindingIndex(descriptor->entryCount) != descriptor->layout->GetBindingCount()) {
return DAWN_VALIDATION_ERROR("numBindings mismatch");
}
const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
- std::bitset<kMaxBindingsPerGroup> bindingsSet;
+ ityp::bitset<BindingIndex, kMaxBindingsPerGroup> bindingsSet;
for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
const BindGroupEntry& entry = descriptor->entries[i];
@@ -180,11 +191,13 @@ namespace dawn_native {
// Perform binding-type specific validation.
switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer:
- DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Uniform));
+ DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Uniform,
+ bindingInfo));
break;
case wgpu::BindingType::StorageBuffer:
case wgpu::BindingType::ReadonlyStorageBuffer:
- DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Storage));
+ DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Storage,
+ bindingInfo));
break;
case wgpu::BindingType::SampledTexture:
DAWN_TRY(ValidateTextureBinding(device, entry, wgpu::TextureUsage::Sampled,
@@ -194,8 +207,6 @@ namespace dawn_native {
case wgpu::BindingType::ComparisonSampler:
DAWN_TRY(ValidateSamplerBinding(device, entry, bindingInfo.type));
break;
- // TODO(jiawei.shao@intel.com): support creating bind group with read-only and
- // write-only storage textures.
case wgpu::BindingType::ReadonlyStorageTexture:
case wgpu::BindingType::WriteonlyStorageTexture:
DAWN_TRY(ValidateTextureBinding(device, entry, wgpu::TextureUsage::Storage,
@@ -225,7 +236,7 @@ namespace dawn_native {
: ObjectBase(device),
mLayout(descriptor->layout),
mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
- for (BindingIndex i = 0; i < mLayout->GetBindingCount(); ++i) {
+ for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
// TODO(enga): Shouldn't be needed when bindings are tightly packed.
// This is to fill Ref<ObjectBase> holes with nullptrs.
new (&mBindingData.bindings[i]) Ref<ObjectBase>();
@@ -264,12 +275,22 @@ namespace dawn_native {
continue;
}
}
+
+ uint32_t packedIdx = 0;
+ for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
+ ++bindingIndex) {
+ if (descriptor->layout->GetBindingInfo(bindingIndex).minBufferBindingSize == 0) {
+ mBindingData.unverifiedBufferSizes[packedIdx] =
+ mBindingData.bufferData[bindingIndex].size;
+ ++packedIdx;
+ }
+ }
}
BindGroupBase::~BindGroupBase() {
if (mLayout) {
ASSERT(!IsError());
- for (BindingIndex i = 0; i < mLayout->GetBindingCount(); ++i) {
+ for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
mBindingData.bindings[i].~Ref<ObjectBase>();
}
}
@@ -297,6 +318,16 @@ namespace dawn_native {
return mLayout.Get();
}
+ const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
+ ASSERT(!IsError());
+ return mLayout.Get();
+ }
+
+ const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
+ ASSERT(!IsError());
+ return mBindingData.unverifiedBufferSizes;
+ }
+
BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
ASSERT(!IsError());
ASSERT(bindingIndex < mLayout->GetBindingCount());
@@ -309,7 +340,7 @@ namespace dawn_native {
mBindingData.bufferData[bindingIndex].size};
}
- SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) {
+ SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
ASSERT(!IsError());
ASSERT(bindingIndex < mLayout->GetBindingCount());
ASSERT(mLayout->GetBindingInfo(bindingIndex).type == wgpu::BindingType::Sampler ||
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.h b/chromium/third_party/dawn/src/dawn_native/BindGroup.h
index 6afee610836..c29bbeb3aa9 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.h
@@ -44,9 +44,11 @@ namespace dawn_native {
static BindGroupBase* MakeError(DeviceBase* device);
BindGroupLayoutBase* GetLayout();
+ const BindGroupLayoutBase* GetLayout() const;
BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
- SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex);
+ SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
+ const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
protected:
// To save memory, the size of a bind group is dynamically determined and the bind group is
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h b/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
index 8227d84953d..e17d26349f1 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
@@ -15,6 +15,7 @@
#ifndef DAWNNATIVE_BINDGROUPANDSTORAGEBARRIERTRACKER_H_
#define DAWNNATIVE_BINDGROUPANDSTORAGEBARRIERTRACKER_H_
+#include "common/ityp_bitset.h"
#include "dawn_native/BindGroup.h"
#include "dawn_native/BindGroupTracker.h"
#include "dawn_native/Buffer.h"
@@ -31,17 +32,19 @@ namespace dawn_native {
public:
BindGroupAndStorageBarrierTrackerBase() = default;
- void OnSetBindGroup(uint32_t index,
+ void OnSetBindGroup(BindGroupIndex index,
BindGroupBase* bindGroup,
uint32_t dynamicOffsetCount,
uint32_t* dynamicOffsets) {
+ ASSERT(index < kMaxBindGroupsTyped);
+
if (this->mBindGroups[index] != bindGroup) {
mBindings[index] = {};
mBindingsNeedingBarrier[index] = {};
const BindGroupLayoutBase* layout = bindGroup->GetLayout();
- for (BindingIndex bindingIndex = 0; bindingIndex < layout->GetBindingCount();
+ for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBindingCount();
++bindingIndex) {
const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
@@ -88,10 +91,17 @@ namespace dawn_native {
}
protected:
- std::array<std::bitset<kMaxBindingsPerGroup>, kMaxBindGroups> mBindingsNeedingBarrier = {};
- std::array<std::array<wgpu::BindingType, kMaxBindingsPerGroup>, kMaxBindGroups>
+ ityp::
+ array<BindGroupIndex, ityp::bitset<BindingIndex, kMaxBindingsPerGroup>, kMaxBindGroups>
+ mBindingsNeedingBarrier = {};
+ ityp::array<BindGroupIndex,
+ ityp::array<BindingIndex, wgpu::BindingType, kMaxBindingsPerGroup>,
+ kMaxBindGroups>
mBindingTypes = {};
- std::array<std::array<ObjectBase*, kMaxBindingsPerGroup>, kMaxBindGroups> mBindings = {};
+ ityp::array<BindGroupIndex,
+ ityp::array<BindingIndex, ObjectBase*, kMaxBindingsPerGroup>,
+ kMaxBindGroups>
+ mBindings = {};
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
index 8fb923a2ff5..fce8b903293 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
@@ -19,6 +19,7 @@
#include "dawn_native/Device.h"
#include "dawn_native/ValidationUtils_autogen.h"
+#include <algorithm>
#include <functional>
#include <set>
@@ -67,6 +68,9 @@ namespace dawn_native {
switch (bindingType) {
case wgpu::BindingType::ReadonlyStorageTexture:
case wgpu::BindingType::WriteonlyStorageTexture: {
+ if (storageTextureFormat == wgpu::TextureFormat::Undefined) {
+ return DAWN_VALIDATION_ERROR("Storage texture format is missing");
+ }
DAWN_TRY(ValidateTextureFormat(storageTextureFormat));
const Format* format = nullptr;
@@ -93,6 +97,96 @@ namespace dawn_native {
return {};
}
+ MaybeError ValidateStorageTextureViewDimension(wgpu::BindingType bindingType,
+ wgpu::TextureViewDimension dimension) {
+ switch (bindingType) {
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ break;
+ }
+
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::ComparisonSampler:
+ case wgpu::BindingType::SampledTexture:
+ return {};
+
+ case wgpu::BindingType::StorageTexture:
+ default:
+ UNREACHABLE();
+ return {};
+ }
+
+ switch (dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return DAWN_VALIDATION_ERROR(
+ "Cube map and cube map texture views cannot be used as storage textures");
+
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e3D:
+ return {};
+
+ case wgpu::TextureViewDimension::Undefined:
+ default:
+ UNREACHABLE();
+ return {};
+ }
+ }
+
+ MaybeError ValidateBindingCanBeMultisampled(wgpu::BindingType bindingType,
+ wgpu::TextureViewDimension viewDimension) {
+ switch (bindingType) {
+ case wgpu::BindingType::SampledTexture:
+ break;
+
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
+ return DAWN_VALIDATION_ERROR("Storage texture bindings may not be multisampled");
+
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ return DAWN_VALIDATION_ERROR("Buffer bindings may not be multisampled");
+
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::ComparisonSampler:
+ return DAWN_VALIDATION_ERROR("Sampler bindings may not be multisampled");
+
+ case wgpu::BindingType::StorageTexture:
+ default:
+ UNREACHABLE();
+ return {};
+ }
+
+ switch (viewDimension) {
+ case wgpu::TextureViewDimension::e2D:
+ break;
+
+ case wgpu::TextureViewDimension::e2DArray:
+ return DAWN_VALIDATION_ERROR("2D array texture bindings may not be multisampled");
+
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return DAWN_VALIDATION_ERROR("Cube texture bindings may not be multisampled");
+
+ case wgpu::TextureViewDimension::e3D:
+ return DAWN_VALIDATION_ERROR("3D texture bindings may not be multisampled");
+
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::Undefined:
+ default:
+ UNREACHABLE();
+ return {};
+ }
+
+ return {};
+ }
+
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor) {
if (descriptor->nextInChain != nullptr) {
@@ -110,8 +204,10 @@ namespace dawn_native {
DAWN_TRY(ValidateBindingType(entry.type));
DAWN_TRY(ValidateTextureComponentType(entry.textureComponentType));
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
if (entry.viewDimension != wgpu::TextureViewDimension::Undefined) {
DAWN_TRY(ValidateTextureViewDimension(entry.viewDimension));
+ viewDimension = entry.viewDimension;
}
if (bindingsSet.count(bindingNumber) != 0) {
@@ -122,6 +218,12 @@ namespace dawn_native {
DAWN_TRY(ValidateStorageTextureFormat(device, entry.type, entry.storageTextureFormat));
+ DAWN_TRY(ValidateStorageTextureViewDimension(entry.type, viewDimension));
+
+ if (entry.multisampled) {
+ DAWN_TRY(ValidateBindingCanBeMultisampled(entry.type, viewDimension));
+ }
+
switch (entry.type) {
case wgpu::BindingType::UniformBuffer:
if (entry.hasDynamicOffset) {
@@ -147,11 +249,6 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("storage textures aren't supported (yet)");
}
- if (entry.multisampled) {
- return DAWN_VALIDATION_ERROR(
- "BindGroupLayoutEntry::multisampled must be false (for now)");
- }
-
bindingsSet.insert(bindingNumber);
}
@@ -176,7 +273,8 @@ namespace dawn_native {
void HashCombineBindingInfo(size_t* hash, const BindingInfo& info) {
HashCombine(hash, info.hasDynamicOffset, info.multisampled, info.visibility, info.type,
- info.textureComponentType, info.viewDimension, info.storageTextureFormat);
+ info.textureComponentType, info.viewDimension, info.storageTextureFormat,
+ info.minBufferBindingSize);
}
bool operator!=(const BindingInfo& a, const BindingInfo& b) {
@@ -186,25 +284,57 @@ namespace dawn_native {
a.type != b.type || //
a.textureComponentType != b.textureComponentType || //
a.viewDimension != b.viewDimension || //
- a.storageTextureFormat != b.storageTextureFormat;
+ a.storageTextureFormat != b.storageTextureFormat || //
+ a.minBufferBindingSize != b.minBufferBindingSize;
}
- bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
- if (a.hasDynamicOffset != b.hasDynamicOffset) {
- // Buffers with dynamic offsets should come before those without.
- // This makes it easy to iterate over the dynamic buffer bindings
- // [0, dynamicBufferCount) during validation.
- return a.hasDynamicOffset > b.hasDynamicOffset;
- }
- if (a.type != b.type) {
- // Buffers have smaller type enums. They should be placed first.
- return a.type < b.type;
+ bool IsBufferBinding(wgpu::BindingType bindingType) {
+ switch (bindingType) {
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ return true;
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::ComparisonSampler:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
+ return false;
+ default:
+ UNREACHABLE();
+ return false;
}
- if (a.binding != b.binding) {
- // Above we ensure that dynamic buffers are first. Now, ensure that bindings are in
- // increasing order. This is because dynamic buffer offsets are applied in
- // increasing order of binding number.
- return a.binding < b.binding;
+ }
+
+ bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
+ const bool aIsBuffer = IsBufferBinding(a.type);
+ const bool bIsBuffer = IsBufferBinding(b.type);
+ if (aIsBuffer != bIsBuffer) {
+ // Always place buffers first.
+ return aIsBuffer;
+ } else {
+ if (aIsBuffer) {
+ ASSERT(bIsBuffer);
+ if (a.hasDynamicOffset != b.hasDynamicOffset) {
+ // Buffers with dynamic offsets should come before those without.
+ // This makes it easy to iterate over the dynamic buffer bindings
+ // [0, dynamicBufferCount) during validation.
+ return a.hasDynamicOffset;
+ }
+ if (a.hasDynamicOffset) {
+ ASSERT(b.hasDynamicOffset);
+ ASSERT(a.binding != b.binding);
+ // Above, we ensured that dynamic buffers are first. Now, ensure that
+ // dynamic buffer bindings are in increasing order. This is because dynamic
+ // buffer offsets are applied in increasing order of binding number.
+ return a.binding < b.binding;
+ }
+ }
+ // Otherwise, sort by type.
+ if (a.type != b.type) {
+ return a.type < b.type;
+ }
}
if (a.visibility != b.visibility) {
return a.visibility < b.visibility;
@@ -221,34 +351,24 @@ namespace dawn_native {
if (a.storageTextureFormat != b.storageTextureFormat) {
return a.storageTextureFormat < b.storageTextureFormat;
}
+ if (a.minBufferBindingSize != b.minBufferBindingSize) {
+ return a.minBufferBindingSize < b.minBufferBindingSize;
+ }
return false;
}
// This is a utility function to help ASSERT that the BGL-binding comparator places buffers
// first.
- bool CheckBufferBindingsFirst(const BindingInfo* bindings, BindingIndex count) {
- ASSERT(count <= kMaxBindingsPerGroup);
+ bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
+ ASSERT(bindings.size() <= BindingIndex(kMaxBindingsPerGroup));
- BindingIndex lastBufferIndex = 0;
+ BindingIndex lastBufferIndex{0};
BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
- for (BindingIndex i = 0; i < count; ++i) {
- switch (bindings[i].type) {
- case wgpu::BindingType::UniformBuffer:
- case wgpu::BindingType::StorageBuffer:
- case wgpu::BindingType::ReadonlyStorageBuffer:
- lastBufferIndex = std::max(i, lastBufferIndex);
- break;
- case wgpu::BindingType::SampledTexture:
- case wgpu::BindingType::Sampler:
- case wgpu::BindingType::ComparisonSampler:
- case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageTexture:
- case wgpu::BindingType::WriteonlyStorageTexture:
- firstNonBufferIndex = std::min(i, firstNonBufferIndex);
- break;
- default:
- UNREACHABLE();
- break;
+ for (BindingIndex i{0}; i < bindings.size(); ++i) {
+ if (IsBufferBinding(bindings[i].type)) {
+ lastBufferIndex = std::max(i, lastBufferIndex);
+ } else {
+ firstNonBufferIndex = std::min(i, firstNonBufferIndex);
}
}
@@ -269,13 +389,15 @@ namespace dawn_native {
std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
- for (BindingIndex i = 0; i < mBindingCount; ++i) {
- const BindGroupLayoutEntry& binding = sortedBindings[i];
+ for (BindingIndex i{0}; i < mBindingCount; ++i) {
+ const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
+ mBindingInfo[i].binding = BindingNumber(binding.binding);
mBindingInfo[i].type = binding.type;
mBindingInfo[i].visibility = binding.visibility;
mBindingInfo[i].textureComponentType =
Format::TextureComponentTypeToFormatType(binding.textureComponentType);
mBindingInfo[i].storageTextureFormat = binding.storageTextureFormat;
+ mBindingInfo[i].minBufferBindingSize = binding.minBufferBindingSize;
switch (binding.type) {
case wgpu::BindingType::UniformBuffer:
@@ -284,6 +406,9 @@ namespace dawn_native {
// Buffers must be contiguously packed at the start of the binding info.
ASSERT(mBufferCount == i);
++mBufferCount;
+ if (binding.minBufferBindingSize == 0) {
+ ++mUnverifiedBufferCount;
+ }
break;
default:
break;
@@ -320,7 +445,7 @@ namespace dawn_native {
const auto& it = mBindingMap.emplace(BindingNumber(binding.binding), i);
ASSERT(it.second);
}
- ASSERT(CheckBufferBindingsFirst(mBindingInfo.data(), mBindingCount));
+ ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), mBindingCount}));
}
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -367,7 +492,7 @@ namespace dawn_native {
if (a->GetBindingCount() != b->GetBindingCount()) {
return false;
}
- for (BindingIndex i = 0; i < a->GetBindingCount(); ++i) {
+ for (BindingIndex i{0}; i < a->GetBindingCount(); ++i) {
if (a->mBindingInfo[i] != b->mBindingInfo[i]) {
return false;
}
@@ -379,8 +504,14 @@ namespace dawn_native {
return mBindingCount;
}
+ BindingIndex BindGroupLayoutBase::GetBufferCount() const {
+ return mBufferCount;
+ }
+
BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
- return mDynamicStorageBufferCount + mDynamicUniformBufferCount;
+ // This is a binding index because dynamic buffers are packed at the front of the binding
+ // info.
+ return static_cast<BindingIndex>(mDynamicStorageBufferCount + mDynamicUniformBufferCount);
}
uint32_t BindGroupLayoutBase::GetDynamicUniformBufferCount() const {
@@ -391,23 +522,41 @@ namespace dawn_native {
return mDynamicStorageBufferCount;
}
+ uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
+ return mUnverifiedBufferCount;
+ }
+
size_t BindGroupLayoutBase::GetBindingDataSize() const {
// | ------ buffer-specific ----------| ------------ object pointers -------------|
// | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
- size_t objectPointerStart = mBufferCount * sizeof(BufferBindingData);
+ // Followed by:
+ // |---------buffer size array--------|
+ // |-uint64_t[mUnverifiedBufferCount]-|
+ size_t objectPointerStart = static_cast<uint32_t>(mBufferCount) * sizeof(BufferBindingData);
ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
- return objectPointerStart + mBindingCount * sizeof(Ref<ObjectBase>);
+ size_t bufferSizeArrayStart = Align(
+ objectPointerStart + static_cast<uint32_t>(mBindingCount) * sizeof(Ref<ObjectBase>),
+ sizeof(uint64_t));
+ ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
+ return bufferSizeArrayStart + mUnverifiedBufferCount * sizeof(uint64_t);
}
BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
void* dataStart) const {
BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
- auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBufferCount);
+ auto bindings =
+ reinterpret_cast<Ref<ObjectBase>*>(bufferData + static_cast<uint32_t>(mBufferCount));
+ uint64_t* unverifiedBufferSizes =
+ AlignPtr(reinterpret_cast<uint64_t*>(bindings + static_cast<uint32_t>(mBindingCount)),
+ sizeof(uint64_t));
ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
+ ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
- return {bufferData, bindings};
+ return {{bufferData, mBufferCount},
+ {bindings, mBindingCount},
+ {unverifiedBufferSizes, mUnverifiedBufferCount}};
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
index 21a79129a0f..4c3c4c63ca6 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
@@ -18,6 +18,8 @@
#include "common/Constants.h"
#include "common/Math.h"
#include "common/SlabAllocator.h"
+#include "common/ityp_array.h"
+#include "common/ityp_span.h"
#include "dawn_native/BindingInfo.h"
#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
@@ -25,7 +27,6 @@
#include "dawn_native/dawn_platform.h"
-#include <array>
#include <bitset>
#include <map>
@@ -42,6 +43,12 @@ namespace dawn_native {
wgpu::BindingType bindingType,
wgpu::TextureFormat storageTextureFormat);
+ MaybeError ValidateStorageTextureViewDimension(wgpu::BindingType bindingType,
+ wgpu::TextureViewDimension dimension);
+
+ MaybeError ValidateBindingCanBeMultisampled(wgpu::BindingType bindingType,
+ wgpu::TextureViewDimension viewDimension);
+
// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
// into a packed range of |BindingIndex| integers.
@@ -57,7 +64,7 @@ namespace dawn_native {
const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
ASSERT(!IsError());
- ASSERT(bindingIndex < kMaxBindingsPerGroup);
+ ASSERT(bindingIndex < BindingIndex(kMaxBindingsPerGroup));
return mBindingInfo[bindingIndex];
}
const BindingMap& GetBindingMap() const;
@@ -72,10 +79,12 @@ namespace dawn_native {
};
BindingIndex GetBindingCount() const;
+ BindingIndex GetBufferCount() const;
// Returns |BindingIndex| because dynamic buffers are packed at the front.
BindingIndex GetDynamicBufferCount() const;
uint32_t GetDynamicUniformBufferCount() const;
uint32_t GetDynamicStorageBufferCount() const;
+ uint32_t GetUnverifiedBufferCount() const;
struct BufferBindingData {
uint64_t offset;
@@ -83,8 +92,9 @@ namespace dawn_native {
};
struct BindingDataPointers {
- BufferBindingData* const bufferData = nullptr;
- Ref<ObjectBase>* const bindings = nullptr;
+ ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
+ ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
+ ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
};
// Compute the amount of space / alignment required to store bindings for a bind group of
@@ -111,11 +121,12 @@ namespace dawn_native {
BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
BindingIndex mBindingCount;
- BindingIndex mBufferCount = 0; // |BindingIndex| because buffers are packed at the front.
+ BindingIndex mBufferCount{0}; // |BindingIndex| because buffers are packed at the front.
+ uint32_t mUnverifiedBufferCount = 0; // Buffers with minimum buffer size unspecified
uint32_t mDynamicUniformBufferCount = 0;
uint32_t mDynamicStorageBufferCount = 0;
- std::array<BindingInfo, kMaxBindingsPerGroup> mBindingInfo;
+ ityp::array<BindingIndex, BindingInfo, kMaxBindingsPerGroup> mBindingInfo;
// Map from BindGroupLayoutEntry.binding to packed indices.
BindingMap mBindingMap;
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h b/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
index 121dd0ffa8e..8d03ebf3614 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
@@ -32,11 +32,11 @@ namespace dawn_native {
template <bool CanInheritBindGroups, typename DynamicOffset>
class BindGroupTrackerBase {
public:
- void OnSetBindGroup(uint32_t index,
+ void OnSetBindGroup(BindGroupIndex index,
BindGroupBase* bindGroup,
uint32_t dynamicOffsetCount,
uint32_t* dynamicOffsets) {
- ASSERT(index < kMaxBindGroups);
+ ASSERT(index < kMaxBindGroupsTyped);
if (mBindGroupLayoutsMask[index]) {
// It is okay to only dirty bind groups that are used by the current pipeline
@@ -73,7 +73,7 @@ namespace dawn_native {
// the first |k| matching bind groups may be inherited.
if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
// Dirty bind groups that cannot be inherited.
- std::bitset<kMaxBindGroups> dirtiedGroups =
+ BindGroupLayoutMask dirtiedGroups =
~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
mDirtyBindGroups |= dirtiedGroups;
@@ -98,12 +98,12 @@ namespace dawn_native {
mLastAppliedPipelineLayout = mPipelineLayout;
}
- std::bitset<kMaxBindGroups> mDirtyBindGroups = 0;
- std::bitset<kMaxBindGroups> mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
- std::bitset<kMaxBindGroups> mBindGroupLayoutsMask = 0;
- std::array<BindGroupBase*, kMaxBindGroups> mBindGroups = {};
- std::array<uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
- std::array<std::array<DynamicOffset, kMaxBindingsPerGroup>, kMaxBindGroups>
+ BindGroupLayoutMask mDirtyBindGroups = 0;
+ BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
+ BindGroupLayoutMask mBindGroupLayoutsMask = 0;
+ ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
+ ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
+ ityp::array<BindGroupIndex, std::array<DynamicOffset, kMaxBindingsPerGroup>, kMaxBindGroups>
mDynamicOffsets = {};
// |mPipelineLayout| is the current pipeline layout set on the command buffer.
diff --git a/chromium/third_party/dawn/src/dawn_native/BindingInfo.h b/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
index 384bd8ad4e2..cac4f4ccaab 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
@@ -15,6 +15,9 @@
#ifndef DAWNNATIVE_BINDINGINFO_H_
#define DAWNNATIVE_BINDINGINFO_H_
+#include "common/Constants.h"
+#include "common/TypedInteger.h"
+#include "common/ityp_array.h"
#include "dawn_native/Format.h"
#include "dawn_native/dawn_platform.h"
@@ -22,16 +25,19 @@
namespace dawn_native {
- // TODO(enga): Can we have strongly typed integers so you can't convert between them
- // by accident? And also range-assertions (ex. kMaxBindingsPerGroup) in Debug?
-
// Binding numbers in the shader and BindGroup/BindGroupLayoutDescriptors
- using BindingNumber = uint32_t;
+ using BindingNumber = TypedInteger<struct BindingNumberT, uint32_t>;
// Binding numbers get mapped to a packed range of indices
- using BindingIndex = uint32_t;
+ using BindingIndex = TypedInteger<struct BindingIndexT, uint32_t>;
+
+ using BindGroupIndex = TypedInteger<struct BindGroupIndexT, uint32_t>;
+
+ static constexpr BindingIndex kMaxBindingsPerGroupTyped = BindingIndex(kMaxBindingsPerGroup);
+ static constexpr BindGroupIndex kMaxBindGroupsTyped = BindGroupIndex(kMaxBindGroups);
struct BindingInfo {
+ BindingNumber binding;
wgpu::ShaderStage visibility;
wgpu::BindingType type;
Format::Type textureComponentType = Format::Type::Float;
@@ -39,8 +45,12 @@ namespace dawn_native {
wgpu::TextureFormat storageTextureFormat = wgpu::TextureFormat::Undefined;
bool hasDynamicOffset = false;
bool multisampled = false;
+ uint64_t minBufferBindingSize = 0;
};
+ // For buffer size validation
+ using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
+
} // namespace dawn_native
#endif // DAWNNATIVE_BINDINGINFO_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
index 6a428d97a40..eb7320c56a2 100644
--- a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
@@ -45,6 +45,11 @@ namespace dawn_native {
return std::move(invalidAllocation);
}
+ // Check the unaligned size to avoid overflowing NextPowerOfTwo.
+ if (allocationSize > mMemoryBlockSize) {
+ return std::move(invalidAllocation);
+ }
+
// Round allocation size to nearest power-of-two.
allocationSize = NextPowerOfTwo(allocationSize);
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
index 8a776184e53..3c97655fbaa 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
@@ -18,6 +18,8 @@
#include "dawn_native/Device.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/MapRequestTracker.h"
+#include "dawn_native/Queue.h"
#include "dawn_native/ValidationUtils_autogen.h"
#include <cstdio>
@@ -61,10 +63,6 @@ namespace dawn_native {
return {};
}
- MaybeError SetSubDataImpl(uint32_t start, uint32_t count, const void* data) override {
- UNREACHABLE();
- return {};
- }
MaybeError MapReadAsyncImpl(uint32_t serial) override {
UNREACHABLE();
return {};
@@ -73,6 +71,9 @@ namespace dawn_native {
UNREACHABLE();
return {};
}
+ void* GetMappedPointerImpl() override {
+ return mFakeMappedData.get();
+ }
void UnmapImpl() override {
UNREACHABLE();
}
@@ -162,14 +163,22 @@ namespace dawn_native {
ASSERT(!IsError());
ASSERT(mappedPointer != nullptr);
- mState = BufferState::Mapped;
-
+ // Mappable buffers don't use a staging buffer and are just as if mapped through MapAsync.
if (IsMapWritable()) {
DAWN_TRY(MapAtCreationImpl(mappedPointer));
+ mState = BufferState::Mapped;
ASSERT(*mappedPointer != nullptr);
return {};
}
+ mState = BufferState::MappedAtCreation;
+
+ // 0-sized buffers are not supposed to be written to, Return back any non-null pointer.
+ if (mSize == 0) {
+ *mappedPointer = reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
+ return {};
+ }
+
// If any of these fail, the buffer will be deleted and replaced with an
// error buffer.
// TODO(enga): Suballocate and reuse memory from a larger staging buffer so we don't create
@@ -182,13 +191,14 @@ namespace dawn_native {
return {};
}
- MaybeError BufferBase::ValidateCanUseInSubmitNow() const {
+ MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
ASSERT(!IsError());
switch (mState) {
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Destroyed buffer used in a submit");
case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
return DAWN_VALIDATION_ERROR("Buffer used in a submit while mapped");
case BufferState::Unmapped:
return {};
@@ -200,7 +210,7 @@ namespace dawn_native {
void BufferBase::CallMapReadCallback(uint32_t serial,
WGPUBufferMapAsyncStatus status,
const void* pointer,
- uint32_t dataLength) {
+ uint64_t dataLength) {
ASSERT(!IsError());
if (mMapReadCallback != nullptr && serial == mMapSerial) {
ASSERT(mMapWriteCallback == nullptr);
@@ -221,7 +231,7 @@ namespace dawn_native {
void BufferBase::CallMapWriteCallback(uint32_t serial,
WGPUBufferMapAsyncStatus status,
void* pointer,
- uint32_t dataLength) {
+ uint64_t dataLength) {
ASSERT(!IsError());
if (mMapWriteCallback != nullptr && serial == mMapSerial) {
ASSERT(mMapReadCallback == nullptr);
@@ -240,14 +250,10 @@ namespace dawn_native {
}
void BufferBase::SetSubData(uint32_t start, uint32_t count, const void* data) {
- if (GetDevice()->ConsumedError(ValidateSetSubData(start, count))) {
- return;
- }
- ASSERT(!IsError());
-
- if (GetDevice()->ConsumedError(SetSubDataImpl(start, count, data))) {
- return;
- }
+ Ref<QueueBase> queue = AcquireRef(GetDevice()->GetDefaultQueue());
+ GetDevice()->EmitDeprecationWarning(
+ "Buffer::SetSubData is deprecated, use Queue::WriteBuffer instead");
+ queue->WriteBuffer(this, start, data, count);
}
void BufferBase::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) {
@@ -267,24 +273,12 @@ namespace dawn_native {
mState = BufferState::Mapped;
if (GetDevice()->ConsumedError(MapReadAsyncImpl(mMapSerial))) {
+ CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0);
return;
}
- }
-
- MaybeError BufferBase::SetSubDataImpl(uint32_t start, uint32_t count, const void* data) {
- DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(count, GetDevice()->GetPendingCommandSerial()));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
- memcpy(uploadHandle.mappedBuffer, data, count);
-
- DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(
- uploadHandle.stagingBuffer, uploadHandle.startOffset, this, start, count));
-
- return {};
+ MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
+ tracker->Track(this, mMapSerial, false);
}
void BufferBase::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) {
@@ -304,8 +298,12 @@ namespace dawn_native {
mState = BufferState::Mapped;
if (GetDevice()->ConsumedError(MapWriteAsyncImpl(mMapSerial))) {
+ CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0);
return;
}
+
+ MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
+ tracker->Track(this, mMapSerial, true);
}
void BufferBase::Destroy() {
@@ -320,16 +318,24 @@ namespace dawn_native {
ASSERT(!IsError());
if (mState == BufferState::Mapped) {
- if (mStagingBuffer == nullptr) {
- Unmap();
+ Unmap();
+ } else if (mState == BufferState::MappedAtCreation) {
+ if (mStagingBuffer != nullptr) {
+ mStagingBuffer.reset();
+ } else {
+ ASSERT(mSize == 0);
}
- mStagingBuffer.reset();
}
+
DestroyInternal();
}
MaybeError BufferBase::CopyFromStagingBuffer() {
ASSERT(mStagingBuffer);
+ if (GetSize() == 0) {
+ return {};
+ }
+
DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetSize()));
DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
@@ -349,9 +355,7 @@ namespace dawn_native {
}
ASSERT(!IsError());
- if (mStagingBuffer != nullptr) {
- GetDevice()->ConsumedError(CopyFromStagingBuffer());
- } else {
+ if (mState == BufferState::Mapped) {
// A map request can only be called once, so this will fire only if the request wasn't
// completed before the Unmap.
// Callbacks are not fired if there is no callback registered, so this is correct for
@@ -359,50 +363,20 @@ namespace dawn_native {
CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
UnmapImpl();
- }
- mState = BufferState::Unmapped;
- mMapReadCallback = nullptr;
- mMapWriteCallback = nullptr;
- mMapUserdata = 0;
- }
-
- MaybeError BufferBase::ValidateSetSubData(uint32_t start, uint32_t count) const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- switch (mState) {
- case BufferState::Mapped:
- return DAWN_VALIDATION_ERROR("Buffer is mapped");
- case BufferState::Destroyed:
- return DAWN_VALIDATION_ERROR("Buffer is destroyed");
- case BufferState::Unmapped:
- break;
- }
-
- if (count > GetSize()) {
- return DAWN_VALIDATION_ERROR("Buffer subdata with too much data");
- }
- // Metal requests buffer to buffer copy size must be a multiple of 4 bytes on macOS
- if (count % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Buffer subdata size must be a multiple of 4 bytes");
- }
-
- // Metal requests offset of buffer to buffer copy must be a multiple of 4 bytes on macOS
- if (start % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Start position must be a multiple of 4 bytes");
- }
-
- // Note that no overflow can happen because we already checked for GetSize() >= count
- if (start > GetSize() - count) {
- return DAWN_VALIDATION_ERROR("Buffer subdata out of range");
- }
+ mMapReadCallback = nullptr;
+ mMapWriteCallback = nullptr;
+ mMapUserdata = 0;
- if (!(mUsage & wgpu::BufferUsage::CopyDst)) {
- return DAWN_VALIDATION_ERROR("Buffer needs the CopyDst usage bit");
+ } else if (mState == BufferState::MappedAtCreation) {
+ if (mStagingBuffer != nullptr) {
+ GetDevice()->ConsumedError(CopyFromStagingBuffer());
+ } else {
+ ASSERT(mSize == 0);
+ }
}
- return {};
+ mState = BufferState::Unmapped;
}
MaybeError BufferBase::ValidateMap(wgpu::BufferUsage requiredUsage,
@@ -415,6 +389,7 @@ namespace dawn_native {
switch (mState) {
case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
return DAWN_VALIDATION_ERROR("Buffer already mapped");
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
@@ -436,6 +411,7 @@ namespace dawn_native {
switch (mState) {
case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
// A buffer may be in the Mapped state if it was created with CreateBufferMapped
// even if it did not have a mappable usage.
return {};
@@ -467,4 +443,13 @@ namespace dawn_native {
return mState == BufferState::Mapped;
}
+ void BufferBase::OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite) {
+ void* data = GetMappedPointerImpl();
+ if (isWrite) {
+ CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
+ } else {
+ CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
+ }
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.h b/chromium/third_party/dawn/src/dawn_native/Buffer.h
index 1d35ff0aae2..8a1feb755ef 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.h
@@ -35,10 +35,13 @@ namespace dawn_native {
enum class BufferState {
Unmapped,
Mapped,
+ MappedAtCreation,
Destroyed,
};
public:
+ enum class ClearValue { Zero, NonZero };
+
BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
static BufferBase* MakeError(DeviceBase* device);
@@ -50,8 +53,9 @@ namespace dawn_native {
wgpu::BufferUsage GetUsage() const;
MaybeError MapAtCreation(uint8_t** mappedPointer);
+ void OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite);
- MaybeError ValidateCanUseInSubmitNow() const;
+ MaybeError ValidateCanUseOnQueueNow() const;
// Dawn API
void SetSubData(uint32_t start, uint32_t count, const void* data);
@@ -67,11 +71,11 @@ namespace dawn_native {
void CallMapReadCallback(uint32_t serial,
WGPUBufferMapAsyncStatus status,
const void* pointer,
- uint32_t dataLength);
+ uint64_t dataLength);
void CallMapWriteCallback(uint32_t serial,
WGPUBufferMapAsyncStatus status,
void* pointer,
- uint32_t dataLength);
+ uint64_t dataLength);
void DestroyInternal();
@@ -79,16 +83,15 @@ namespace dawn_native {
private:
virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) = 0;
- virtual MaybeError SetSubDataImpl(uint32_t start, uint32_t count, const void* data);
virtual MaybeError MapReadAsyncImpl(uint32_t serial) = 0;
virtual MaybeError MapWriteAsyncImpl(uint32_t serial) = 0;
virtual void UnmapImpl() = 0;
virtual void DestroyImpl() = 0;
+ virtual void* GetMappedPointerImpl() = 0;
virtual bool IsMapWritable() const = 0;
MaybeError CopyFromStagingBuffer();
- MaybeError ValidateSetSubData(uint32_t start, uint32_t count) const;
MaybeError ValidateMap(wgpu::BufferUsage requiredUsage,
WGPUBufferMapAsyncStatus* status) const;
MaybeError ValidateUnmap() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
index 1465ad010b6..cabbc1a6c31 100644
--- a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
@@ -87,6 +87,8 @@ target_sources(dawn_native PRIVATE
"Forward.h"
"Instance.cpp"
"Instance.h"
+ "MapRequestTracker.cpp"
+ "MapRequestTracker.h"
"ObjectBase.cpp"
"ObjectBase.h"
"PassResourceUsage.h"
@@ -100,6 +102,8 @@ target_sources(dawn_native PRIVATE
"PipelineLayout.h"
"ProgrammablePassEncoder.cpp"
"ProgrammablePassEncoder.h"
+ "QuerySet.cpp"
+ "QuerySet.h"
"Queue.cpp"
"Queue.h"
"RenderBundle.cpp"
@@ -191,6 +195,8 @@ if (DAWN_ENABLE_D3D12)
"d3d12/HeapD3D12.h"
"d3d12/NativeSwapChainImplD3D12.cpp"
"d3d12/NativeSwapChainImplD3D12.h"
+ "d3d12/PageableD3D12.cpp"
+ "d3d12/PageableD3D12.h"
"d3d12/PipelineLayoutD3D12.cpp"
"d3d12/PipelineLayoutD3D12.h"
"d3d12/PlatformFunctions.cpp"
@@ -209,6 +215,8 @@ if (DAWN_ENABLE_D3D12)
"d3d12/ResourceHeapAllocationD3D12.h"
"d3d12/SamplerD3D12.cpp"
"d3d12/SamplerD3D12.h"
+ "d3d12/SamplerHeapCacheD3D12.cpp"
+ "d3d12/SamplerHeapCacheD3D12.h"
"d3d12/ShaderModuleD3D12.cpp"
"d3d12/ShaderModuleD3D12.h"
"d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp"
@@ -327,6 +335,8 @@ if (DAWN_ENABLE_OPENGL)
"opengl/PipelineGL.h"
"opengl/PipelineLayoutGL.cpp"
"opengl/PipelineLayoutGL.h"
+ "opengl/QuerySetGL.cpp"
+ "opengl/QuerySetGL.h"
"opengl/QueueGL.cpp"
"opengl/QueueGL.h"
"opengl/RenderPipelineGL.cpp"
@@ -402,6 +412,8 @@ if (DAWN_ENABLE_VULKAN)
"vulkan/UtilsVulkan.h"
"vulkan/VulkanError.cpp"
"vulkan/VulkanError.h"
+ "vulkan/VulkanExtensions.cpp"
+ "vulkan/VulkanExtensions.h"
"vulkan/VulkanFunctions.cpp"
"vulkan/VulkanFunctions.h"
"vulkan/VulkanInfo.cpp"
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
index e02cff1adcf..401451d4911 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
@@ -44,8 +44,8 @@ namespace dawn_native {
const uint32_t mipLevel) {
Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
- if (extent.depth == copySize.depth && extent.width == copySize.width &&
- extent.height == copySize.height) {
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ if (extent.width == copySize.width && extent.height == copySize.height) {
return true;
}
return false;
@@ -59,11 +59,11 @@ namespace dawn_native {
ASSERT(view->GetLayerCount() == 1);
ASSERT(view->GetLevelCount() == 1);
+ SubresourceRange range = view->GetSubresourceRange();
// If the loadOp is Load, but the subresource is not initialized, use Clear instead.
if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
- !view->GetTexture()->IsSubresourceContentInitialized(
- view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1)) {
+ !view->GetTexture()->IsSubresourceContentInitialized(range)) {
attachmentInfo.loadOp = wgpu::LoadOp::Clear;
attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
}
@@ -73,20 +73,19 @@ namespace dawn_native {
// cleared later in the pipeline. The texture will be resolved from the
// source color attachment, which will be correctly initialized.
TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
+ ASSERT(resolveView->GetLayerCount() == 1);
+ ASSERT(resolveView->GetLevelCount() == 1);
resolveView->GetTexture()->SetIsSubresourceContentInitialized(
- true, resolveView->GetBaseMipLevel(), resolveView->GetLevelCount(),
- resolveView->GetBaseArrayLayer(), resolveView->GetLayerCount());
+ true, resolveView->GetSubresourceRange());
}
switch (attachmentInfo.storeOp) {
case wgpu::StoreOp::Store:
- view->GetTexture()->SetIsSubresourceContentInitialized(
- true, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
+ view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
break;
case wgpu::StoreOp::Clear:
- view->GetTexture()->SetIsSubresourceContentInitialized(
- false, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
+ view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
break;
default:
@@ -98,12 +97,13 @@ namespace dawn_native {
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
auto& attachmentInfo = renderPass->depthStencilAttachment;
TextureViewBase* view = attachmentInfo.view.Get();
+ ASSERT(view->GetLayerCount() == 1);
+ ASSERT(view->GetLevelCount() == 1);
+ SubresourceRange range = view->GetSubresourceRange();
// If the depth stencil texture has not been initialized, we want to use loadop
// clear to init the contents to 0's
- if (!view->GetTexture()->IsSubresourceContentInitialized(
- view->GetBaseMipLevel(), view->GetLevelCount(), view->GetBaseArrayLayer(),
- view->GetLayerCount())) {
+ if (!view->GetTexture()->IsSubresourceContentInitialized(range)) {
if (view->GetTexture()->GetFormat().HasDepth() &&
attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
attachmentInfo.clearDepth = 0.0f;
@@ -125,15 +125,11 @@ namespace dawn_native {
if (attachmentInfo.depthStoreOp == wgpu::StoreOp::Store &&
attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store) {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- true, view->GetBaseMipLevel(), view->GetLevelCount(), view->GetBaseArrayLayer(),
- view->GetLayerCount());
+ view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
} else {
ASSERT(attachmentInfo.depthStoreOp == wgpu::StoreOp::Clear &&
attachmentInfo.stencilStoreOp == wgpu::StoreOp::Clear);
- view->GetTexture()->SetIsSubresourceContentInitialized(
- false, view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount());
+ view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
}
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
index 88197f3c6e6..7c4a327acaa 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
@@ -24,6 +24,21 @@
namespace dawn_native {
+ namespace {
+ bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
+ const std::vector<uint64_t>& pipelineMinimumBufferSizes) {
+ ASSERT(unverifiedBufferSizes.size() == pipelineMinimumBufferSizes.size());
+
+ for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
+ if (unverifiedBufferSizes[i] < pipelineMinimumBufferSizes[i]) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+ } // namespace
+
enum ValidationAspect {
VALIDATION_ASPECT_PIPELINE,
VALIDATION_ASPECT_BIND_GROUPS,
@@ -85,9 +100,11 @@ namespace dawn_native {
if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
bool matches = true;
- for (uint32_t i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+ for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
if (mBindgroups[i] == nullptr ||
- mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout()) {
+ mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
+ !BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+ (*mMinimumBufferSizes)[i])) {
matches = false;
break;
}
@@ -123,7 +140,28 @@ namespace dawn_native {
}
if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
- return DAWN_VALIDATION_ERROR("Missing bind group");
+ for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+ if (mBindgroups[i] == nullptr) {
+ return DAWN_VALIDATION_ERROR("Missing bind group " +
+ std::to_string(static_cast<uint32_t>(i)));
+ } else if (mLastPipelineLayout->GetBindGroupLayout(i) !=
+ mBindgroups[i]->GetLayout()) {
+ return DAWN_VALIDATION_ERROR(
+ "Pipeline and bind group layout doesn't match for bind group " +
+ std::to_string(static_cast<uint32_t>(i)));
+ } else if (!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+ (*mMinimumBufferSizes)[i])) {
+ return DAWN_VALIDATION_ERROR("Binding sizes too small for bind group " +
+ std::to_string(static_cast<uint32_t>(i)));
+ }
+ }
+
+ // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
+ // It returns the first invalid state found. We shouldn't be able to reach this line
+ // because to have invalid aspects one of the above conditions must have failed earlier.
+ // If this is reached, make sure lazy aspects and the error checks above are consistent.
+ UNREACHABLE();
+ return DAWN_VALIDATION_ERROR("Bind groups invalid");
}
if (aspects[VALIDATION_ASPECT_PIPELINE]) {
@@ -142,8 +180,9 @@ namespace dawn_native {
SetPipelineCommon(pipeline);
}
- void CommandBufferStateTracker::SetBindGroup(uint32_t index, BindGroupBase* bindgroup) {
+ void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index, BindGroupBase* bindgroup) {
mBindgroups[index] = bindgroup;
+ mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
}
void CommandBufferStateTracker::SetIndexBuffer() {
@@ -156,6 +195,7 @@ namespace dawn_native {
void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
mLastPipelineLayout = pipeline->GetLayout();
+ mMinimumBufferSizes = &pipeline->GetMinimumBufferSizes();
mAspects.set(VALIDATION_ASPECT_PIPELINE);
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
index 8c9c989a95a..39d32fde9d4 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
@@ -16,10 +16,11 @@
#define DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
#include "common/Constants.h"
+#include "common/ityp_array.h"
+#include "dawn_native/BindingInfo.h"
#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
-#include <array>
#include <bitset>
#include <map>
#include <set>
@@ -36,7 +37,7 @@ namespace dawn_native {
// State-modifying methods
void SetComputePipeline(ComputePipelineBase* pipeline);
void SetRenderPipeline(RenderPipelineBase* pipeline);
- void SetBindGroup(uint32_t index, BindGroupBase* bindgroup);
+ void SetBindGroup(BindGroupIndex index, BindGroupBase* bindgroup);
void SetIndexBuffer();
void SetVertexBuffer(uint32_t slot);
@@ -52,11 +53,13 @@ namespace dawn_native {
ValidationAspects mAspects;
- std::array<BindGroupBase*, kMaxBindGroups> mBindgroups = {};
+ ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {};
std::bitset<kMaxVertexBuffers> mVertexBufferSlotsUsed;
PipelineLayoutBase* mLastPipelineLayout = nullptr;
RenderPipelineBase* mLastRenderPipeline = nullptr;
+
+ const RequiredBufferSizes* mMinimumBufferSizes = nullptr;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
index 31b635b378c..5b8bc640f83 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
@@ -37,6 +37,8 @@ namespace dawn_native {
namespace {
+ // TODO(jiawei.shao@intel.com): add validations on the texture-to-texture copies within the
+ // same texture.
MaybeError ValidateCopySizeFitsInTexture(const TextureCopyView& textureCopy,
const Extent3D& copySize) {
const TextureBase* texture = textureCopy.texture;
@@ -44,27 +46,26 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Copy mipLevel out of range");
}
- if (textureCopy.arrayLayer >= texture->GetArrayLayers()) {
- return DAWN_VALIDATION_ERROR("Copy arrayLayer out of range");
- }
-
- Extent3D extent = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
+ Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
+ // For 2D textures, include the array layer as depth so it can be checked with other
+ // dimensions.
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ mipSize.depth = texture->GetArrayLayers();
// All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
// overflows.
- if (uint64_t(textureCopy.origin.x) + uint64_t(copySize.width) >
- static_cast<uint64_t>(extent.width) ||
- uint64_t(textureCopy.origin.y) + uint64_t(copySize.height) >
- static_cast<uint64_t>(extent.height)) {
+ if (static_cast<uint64_t>(textureCopy.origin.x) +
+ static_cast<uint64_t>(copySize.width) >
+ static_cast<uint64_t>(mipSize.width) ||
+ static_cast<uint64_t>(textureCopy.origin.y) +
+ static_cast<uint64_t>(copySize.height) >
+ static_cast<uint64_t>(mipSize.height) ||
+ static_cast<uint64_t>(textureCopy.origin.z) +
+ static_cast<uint64_t>(copySize.depth) >
+ static_cast<uint64_t>(mipSize.depth)) {
return DAWN_VALIDATION_ERROR("Copy would touch outside of the texture");
}
- // TODO(cwallez@chromium.org): Check the depth bound differently for 2D arrays and 3D
- // textures
- if (textureCopy.origin.z != 0 || copySize.depth > 1) {
- return DAWN_VALIDATION_ERROR("No support for z != 0 and depth > 1 for now");
- }
-
return {};
}
@@ -102,20 +103,6 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateB2BCopyWithinSameBuffer(uint64_t dataSize,
- uint64_t srcOffset,
- uint64_t dstOffset) {
- uint64_t maxOffset = std::max(srcOffset, dstOffset);
- uint64_t minOffset = std::min(srcOffset, dstOffset);
-
- if (minOffset + dataSize > maxOffset) {
- return DAWN_VALIDATION_ERROR(
- "Copy regions cannot overlap when copy within the same buffer");
- }
-
- return {};
- }
-
MaybeError ValidateTexelBufferOffset(const BufferCopyView& bufferCopy,
const Format& format) {
if (bufferCopy.offset % format.blockByteSize != 0) {
@@ -154,9 +141,10 @@ namespace dawn_native {
const Extent3D& copySize) {
Extent3D srcSize = src.texture->GetSize();
- if (dst.origin.x != 0 || dst.origin.y != 0 || dst.origin.z != 0 ||
- srcSize.width != copySize.width || srcSize.height != copySize.height ||
- srcSize.depth != copySize.depth) {
+ ASSERT(src.texture->GetDimension() == wgpu::TextureDimension::e2D &&
+ dst.texture->GetDimension() == wgpu::TextureDimension::e2D);
+ if (dst.origin.x != 0 || dst.origin.y != 0 || srcSize.width != copySize.width ||
+ srcSize.height != copySize.height) {
return DAWN_VALIDATION_ERROR(
"The entire subresource must be copied when using a depth/stencil texture or "
"when samples are greater than 1.");
@@ -191,6 +179,16 @@ namespace dawn_native {
DAWN_TRY(ValidateEntireSubresourceCopied(src, dst, copySize));
}
+ if (src.texture == dst.texture && src.mipLevel == dst.mipLevel) {
+ ASSERT(src.texture->GetDimension() == wgpu::TextureDimension::e2D &&
+ dst.texture->GetDimension() == wgpu::TextureDimension::e2D);
+ if (IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depth)) {
+ return DAWN_VALIDATION_ERROR(
+ "Copy subresources cannot be overlapped when copying within the same "
+ "texture.");
+ }
+ }
+
return {};
}
@@ -200,23 +198,28 @@ namespace dawn_native {
uint32_t rowsPerImage,
uint32_t* bufferSize) {
ASSERT(rowsPerImage >= copySize.height);
+ if (copySize.width == 0 || copySize.height == 0 || copySize.depth == 0) {
+ *bufferSize = 0;
+ return {};
+ }
+
uint32_t blockByteSize = textureFormat.blockByteSize;
uint32_t blockWidth = textureFormat.blockWidth;
uint32_t blockHeight = textureFormat.blockHeight;
// TODO(cwallez@chromium.org): check for overflows
uint32_t slicePitch = bytesPerRow * rowsPerImage / blockWidth;
+
+ ASSERT(copySize.height >= 1);
uint32_t sliceSize = bytesPerRow * (copySize.height / blockHeight - 1) +
(copySize.width / blockWidth) * blockByteSize;
+
+ ASSERT(copySize.depth >= 1);
*bufferSize = (slicePitch * (copySize.depth - 1)) + sliceSize;
return {};
}
- uint32_t ComputeDefaultBytesPerRow(const Format& format, uint32_t width) {
- return width / format.blockWidth * format.blockByteSize;
- }
-
MaybeError ValidateBytesPerRow(const Format& format,
const Extent3D& copySize,
uint32_t bytesPerRow) {
@@ -499,6 +502,25 @@ namespace dawn_native {
return {};
}
+ ResultOrError<TextureCopyView> FixTextureCopyView(DeviceBase* device,
+ const TextureCopyView* view) {
+ TextureCopyView fixedView = *view;
+
+ if (view->arrayLayer != 0) {
+ if (view->origin.z != 0) {
+ return DAWN_VALIDATION_ERROR("arrayLayer and origin.z cannot both be != 0");
+ } else {
+ fixedView.origin.z = fixedView.arrayLayer;
+ fixedView.arrayLayer = 1;
+ device->EmitDeprecationWarning(
+ "wgpu::TextureCopyView::arrayLayer is deprecated in favor of "
+ "::origin::z");
+ }
+ }
+
+ return fixedView;
+ }
+
} // namespace
CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor*)
@@ -624,15 +646,15 @@ namespace dawn_native {
DAWN_TRY(GetDevice()->ValidateObject(source));
DAWN_TRY(GetDevice()->ValidateObject(destination));
+ if (source == destination) {
+ return DAWN_VALIDATION_ERROR(
+ "Source and destination cannot be the same buffer.");
+ }
+
DAWN_TRY(ValidateCopySizeFitsInBuffer(source, sourceOffset, size));
DAWN_TRY(ValidateCopySizeFitsInBuffer(destination, destinationOffset, size));
DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
- if (source == destination) {
- DAWN_TRY(
- ValidateB2BCopyWithinSameBuffer(size, sourceOffset, destinationOffset));
- }
-
DAWN_TRY(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst));
@@ -640,13 +662,16 @@ namespace dawn_native {
mTopLevelBuffers.insert(destination);
}
- CopyBufferToBufferCmd* copy =
- allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
- copy->source = source;
- copy->sourceOffset = sourceOffset;
- copy->destination = destination;
- copy->destinationOffset = destinationOffset;
- copy->size = size;
+ // Skip noop copies. Some backends validation rules disallow them.
+ if (size != 0) {
+ CopyBufferToBufferCmd* copy =
+ allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
+ copy->source = source;
+ copy->sourceOffset = sourceOffset;
+ copy->destination = destination;
+ copy->destinationOffset = destinationOffset;
+ copy->size = size;
+ }
return {};
});
@@ -656,19 +681,19 @@ namespace dawn_native {
const TextureCopyView* destination,
const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ // TODO(dawn:22): Remove once migration from GPUTextureCopyView.arrayLayer to
+ // GPUTextureCopyView.origin.z is done.
+ TextureCopyView fixedDest;
+ DAWN_TRY_ASSIGN(fixedDest, FixTextureCopyView(GetDevice(), destination));
+ destination = &fixedDest;
+
// Validate objects before doing the defaulting.
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(source->buffer));
DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
}
- // Compute default values for bytesPerRow/rowsPerImage
- uint32_t defaultedBytesPerRow = source->bytesPerRow;
- if (defaultedBytesPerRow == 0) {
- defaultedBytesPerRow =
- ComputeDefaultBytesPerRow(destination->texture->GetFormat(), copySize->width);
- }
-
+ // Compute default value for rowsPerImage
uint32_t defaultedRowsPerImage = source->rowsPerImage;
if (defaultedRowsPerImage == 0) {
defaultedRowsPerImage = copySize->height;
@@ -686,10 +711,10 @@ namespace dawn_native {
uint32_t bufferCopySize = 0;
DAWN_TRY(ValidateBytesPerRow(destination->texture->GetFormat(), *copySize,
- defaultedBytesPerRow));
+ source->bytesPerRow));
DAWN_TRY(ComputeTextureCopyBufferSize(destination->texture->GetFormat(), *copySize,
- defaultedBytesPerRow, defaultedRowsPerImage,
+ source->bytesPerRow, defaultedRowsPerImage,
&bufferCopySize));
DAWN_TRY(ValidateCopySizeFitsInTexture(*destination, *copySize));
@@ -708,7 +733,7 @@ namespace dawn_native {
allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
copy->source.buffer = source->buffer;
copy->source.offset = source->offset;
- copy->source.bytesPerRow = defaultedBytesPerRow;
+ copy->source.bytesPerRow = source->bytesPerRow;
copy->source.rowsPerImage = defaultedRowsPerImage;
copy->destination.texture = destination->texture;
copy->destination.origin = destination->origin;
@@ -716,6 +741,10 @@ namespace dawn_native {
copy->destination.mipLevel = destination->mipLevel;
copy->destination.arrayLayer = destination->arrayLayer;
+ // TODO(cwallez@chromium.org): Make backends use origin.z instead of arrayLayer
+ copy->destination.arrayLayer = copy->destination.origin.z;
+ copy->destination.origin.z = 0;
+
return {};
});
}
@@ -724,19 +753,19 @@ namespace dawn_native {
const BufferCopyView* destination,
const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ // TODO(dawn:22): Remove once migration from GPUTextureCopyView.arrayLayer to
+ // GPUTextureCopyView.origin.z is done.
+ TextureCopyView fixedSrc;
+ DAWN_TRY_ASSIGN(fixedSrc, FixTextureCopyView(GetDevice(), source));
+ source = &fixedSrc;
+
// Validate objects before doing the defaulting.
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(source->texture));
DAWN_TRY(GetDevice()->ValidateObject(destination->buffer));
}
- // Compute default values for bytesPerRow/rowsPerImage
- uint32_t defaultedBytesPerRow = destination->bytesPerRow;
- if (defaultedBytesPerRow == 0) {
- defaultedBytesPerRow =
- ComputeDefaultBytesPerRow(source->texture->GetFormat(), copySize->width);
- }
-
+ // Compute default value for rowsPerImage
uint32_t defaultedRowsPerImage = destination->rowsPerImage;
if (defaultedRowsPerImage == 0) {
defaultedRowsPerImage = copySize->height;
@@ -753,10 +782,10 @@ namespace dawn_native {
uint32_t bufferCopySize = 0;
DAWN_TRY(ValidateBytesPerRow(source->texture->GetFormat(), *copySize,
- defaultedBytesPerRow));
+ destination->bytesPerRow));
DAWN_TRY(ComputeTextureCopyBufferSize(source->texture->GetFormat(), *copySize,
- defaultedBytesPerRow, defaultedRowsPerImage,
- &bufferCopySize));
+ destination->bytesPerRow,
+ defaultedRowsPerImage, &bufferCopySize));
DAWN_TRY(ValidateCopySizeFitsInTexture(*source, *copySize));
DAWN_TRY(ValidateCopySizeFitsInBuffer(*destination, bufferCopySize));
@@ -779,9 +808,13 @@ namespace dawn_native {
copy->source.arrayLayer = source->arrayLayer;
copy->destination.buffer = destination->buffer;
copy->destination.offset = destination->offset;
- copy->destination.bytesPerRow = defaultedBytesPerRow;
+ copy->destination.bytesPerRow = destination->bytesPerRow;
copy->destination.rowsPerImage = defaultedRowsPerImage;
+ // TODO(cwallez@chromium.org): Make backends use origin.z instead of arrayLayer
+ copy->source.arrayLayer = copy->source.origin.z;
+ copy->source.origin.z = 0;
+
return {};
});
}
@@ -790,6 +823,15 @@ namespace dawn_native {
const TextureCopyView* destination,
const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ // TODO(dawn:22): Remove once migration from GPUTextureCopyView.arrayLayer to
+ // GPUTextureCopyView.origin.z is done.
+ TextureCopyView fixedSrc;
+ DAWN_TRY_ASSIGN(fixedSrc, FixTextureCopyView(GetDevice(), source));
+ source = &fixedSrc;
+ TextureCopyView fixedDest;
+ DAWN_TRY_ASSIGN(fixedDest, FixTextureCopyView(GetDevice(), destination));
+ destination = &fixedDest;
+
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(source->texture));
DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
@@ -825,6 +867,12 @@ namespace dawn_native {
copy->destination.arrayLayer = destination->arrayLayer;
copy->copySize = *copySize;
+ // TODO(cwallez@chromium.org): Make backends use origin.z instead of arrayLayer
+ copy->source.arrayLayer = copy->source.origin.z;
+ copy->source.origin.z = 0;
+ copy->destination.arrayLayer = copy->destination.origin.z;
+ copy->destination.origin.z = 0;
+
return {};
});
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
index 55b6cec0afc..7f8da9bcdf2 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
@@ -339,4 +339,11 @@ namespace dawn_native {
return {};
}
+ bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
+ uint32_t maxStart = std::max(startA, startB);
+ uint32_t minStart = std::min(startA, startB);
+ return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
+ static_cast<uint64_t>(maxStart);
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
index d649ce32eeb..53871ccdc5e 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
@@ -36,6 +36,8 @@ namespace dawn_native {
MaybeError ValidatePassResourceUsage(const PassResourceUsage& usage);
+ bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
+
} // namespace dawn_native
#endif // DAWNNATIVE_COMMANDVALIDATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.h b/chromium/third_party/dawn/src/dawn_native/Commands.h
index 6ccd2f39c37..155a21d83be 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.h
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.h
@@ -18,6 +18,7 @@
#include "common/Constants.h"
#include "dawn_native/AttachmentState.h"
+#include "dawn_native/BindingInfo.h"
#include "dawn_native/Texture.h"
#include "dawn_native/dawn_platform.h"
@@ -210,7 +211,7 @@ namespace dawn_native {
};
struct SetBindGroupCmd {
- uint32_t index;
+ BindGroupIndex index;
Ref<BindGroupBase> group;
uint32_t dynamicOffsetCount;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
index c1394c68f9d..ee49b1151d1 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
@@ -19,6 +19,13 @@
namespace dawn_native {
+ namespace {
+ RequiredBufferSizes ComputeMinBufferSizes(const ComputePipelineDescriptor* descriptor) {
+ return descriptor->computeStage.module->ComputeRequiredBufferSizesForLayout(
+ descriptor->layout);
+ }
+ } // anonymous namespace
+
MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
const ComputePipelineDescriptor* descriptor) {
if (descriptor->nextInChain != nullptr) {
@@ -38,7 +45,10 @@ namespace dawn_native {
ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
const ComputePipelineDescriptor* descriptor)
- : PipelineBase(device, descriptor->layout, wgpu::ShaderStage::Compute),
+ : PipelineBase(device,
+ descriptor->layout,
+ wgpu::ShaderStage::Compute,
+ ComputeMinBufferSizes(descriptor)),
mModule(descriptor->computeStage.module),
mEntryPoint(descriptor->computeStage.entryPoint) {
}
diff --git a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
index 14ae72a407c..32061152bdf 100644
--- a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
@@ -178,8 +178,8 @@ namespace dawn_native {
uint32_t layerCount) {
dawn_native::TextureBase* textureBase =
reinterpret_cast<dawn_native::TextureBase*>(texture);
- return textureBase->IsSubresourceContentInitialized(baseMipLevel, levelCount,
- baseArrayLayer, layerCount);
+ SubresourceRange range = {baseMipLevel, levelCount, baseArrayLayer, layerCount};
+ return textureBase->IsSubresourceContentInitialized(range);
}
std::vector<const char*> GetProcMapNamesForTestingInternal();
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.cpp b/chromium/third_party/dawn/src/dawn_native/Device.cpp
index 4c90c6f5fbc..1c30cd509c8 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Device.cpp
@@ -30,7 +30,9 @@
#include "dawn_native/Fence.h"
#include "dawn_native/FenceSignalTracker.h"
#include "dawn_native/Instance.h"
+#include "dawn_native/MapRequestTracker.h"
#include "dawn_native/PipelineLayout.h"
+#include "dawn_native/QuerySet.h"
#include "dawn_native/Queue.h"
#include "dawn_native/RenderBundleEncoder.h"
#include "dawn_native/RenderPipeline.h"
@@ -102,6 +104,7 @@ namespace dawn_native {
mCaches = std::make_unique<DeviceBase::Caches>();
mErrorScopeTracker = std::make_unique<ErrorScopeTracker>(this);
mFenceSignalTracker = std::make_unique<FenceSignalTracker>(this);
+ mMapRequestTracker = std::make_unique<MapRequestTracker>(this);
mDynamicUploader = std::make_unique<DynamicUploader>(this);
mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
@@ -122,9 +125,9 @@ namespace dawn_native {
case State::Alive:
// Alive is the only state which can have GPU work happening. Wait for all of it to
// complete before proceeding with destruction.
- // Assert that errors are device loss so that we can continue with destruction
- AssertAndIgnoreDeviceLossError(WaitForIdleForDestruction());
- ASSERT(mCompletedSerial == mLastSubmittedSerial);
+ // Ignore errors so that we can continue with destruction
+ IgnoreErrors(WaitForIdleForDestruction());
+ AssumeCommandsComplete();
break;
case State::BeingDisconnected:
@@ -137,6 +140,8 @@ namespace dawn_native {
case State::Disconnected:
break;
}
+ ASSERT(mCompletedSerial == mLastSubmittedSerial);
+ ASSERT(mFutureCallbackSerial <= mCompletedSerial);
// Skip handling device facilities if they haven't even been created (or failed doing so)
if (mState != State::BeingCreated) {
@@ -146,16 +151,25 @@ namespace dawn_native {
// pending callbacks.
mErrorScopeTracker->Tick(GetCompletedCommandSerial());
mFenceSignalTracker->Tick(GetCompletedCommandSerial());
+ mMapRequestTracker->Tick(GetCompletedCommandSerial());
+ // call TickImpl once last time to clean up resources
+ // Ignore errors so that we can continue with destruction
+ IgnoreErrors(TickImpl());
}
// At this point GPU operations are always finished, so we are in the disconnected state.
mState = State::Disconnected;
+ // mCurrentErrorScope can be null if we failed device initialization.
+ if (mCurrentErrorScope.Get() != nullptr) {
+ mCurrentErrorScope->UnlinkForShutdown();
+ }
mErrorScopeTracker = nullptr;
- mCurrentErrorScope->UnlinkForShutdown();
mFenceSignalTracker = nullptr;
mDynamicUploader = nullptr;
+ mMapRequestTracker = nullptr;
+ AssumeCommandsComplete();
// Tell the backend that it can free all the objects now that the GPU timeline is empty.
ShutDownImpl();
@@ -173,9 +187,12 @@ namespace dawn_native {
// threads in a multithreaded scenario?
mState = State::BeingDisconnected;
- // Assert that errors are device losses so that we can continue with destruction.
- AssertAndIgnoreDeviceLossError(WaitForIdleForDestruction());
- ASSERT(mCompletedSerial == mLastSubmittedSerial);
+ // Ignore errors so that we can continue with destruction
+ // Assume all commands are complete after WaitForIdleForDestruction (because they were)
+ IgnoreErrors(WaitForIdleForDestruction());
+ IgnoreErrors(TickImpl());
+ AssumeCommandsComplete();
+ ASSERT(mFutureCallbackSerial <= mCompletedSerial);
mState = State::Disconnected;
// Now everything is as if the device was lost.
@@ -301,6 +318,10 @@ namespace dawn_native {
return mFenceSignalTracker.get();
}
+ MapRequestTracker* DeviceBase::GetMapRequestTracker() const {
+ return mMapRequestTracker.get();
+ }
+
Serial DeviceBase::GetCompletedCommandSerial() const {
return mCompletedSerial;
}
@@ -309,24 +330,30 @@ namespace dawn_native {
return mLastSubmittedSerial;
}
- void DeviceBase::IncrementLastSubmittedCommandSerial() {
- mLastSubmittedSerial++;
+ Serial DeviceBase::GetFutureCallbackSerial() const {
+ return mFutureCallbackSerial;
}
- void DeviceBase::ArtificiallyIncrementSerials() {
- mCompletedSerial++;
+ void DeviceBase::IncrementLastSubmittedCommandSerial() {
mLastSubmittedSerial++;
}
void DeviceBase::AssumeCommandsComplete() {
- mLastSubmittedSerial++;
- mCompletedSerial = mLastSubmittedSerial;
+ Serial maxSerial = std::max(mLastSubmittedSerial + 1, mFutureCallbackSerial);
+ mLastSubmittedSerial = maxSerial;
+ mCompletedSerial = maxSerial;
}
Serial DeviceBase::GetPendingCommandSerial() const {
return mLastSubmittedSerial + 1;
}
+ void DeviceBase::AddFutureCallbackSerial(Serial serial) {
+ if (serial > mFutureCallbackSerial) {
+ mFutureCallbackSerial = serial;
+ }
+ }
+
void DeviceBase::CheckPassedSerials() {
Serial completedSerial = CheckAndUpdateCompletedSerials();
@@ -621,6 +648,15 @@ namespace dawn_native {
return result;
}
+ QuerySetBase* DeviceBase::CreateQuerySet(const QuerySetDescriptor* descriptor) {
+ QuerySetBase* result = nullptr;
+
+ if (ConsumedError(CreateQuerySetInternal(&result, descriptor))) {
+ return QuerySetBase::MakeError(this);
+ }
+
+ return result;
+ }
QueueBase* DeviceBase::CreateQueue() {
// TODO(dawn:22): Remove this once users use GetDefaultQueue
EmitDeprecationWarning(
@@ -695,22 +731,44 @@ namespace dawn_native {
return result;
}
+ // For Dawn Wire
+
+ BufferBase* DeviceBase::CreateErrorBuffer() {
+ return BufferBase::MakeError(this);
+ }
+
// Other Device API methods
void DeviceBase::Tick() {
if (ConsumedError(ValidateIsAlive())) {
return;
}
- if (ConsumedError(TickImpl())) {
- return;
- }
+ // to avoid overly ticking, we only want to tick when:
+ // 1. the last submitted serial has moved beyond the completed serial
+ // 2. or the completed serial has not reached the future serial set by the trackers
+ if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureCallbackSerial) {
+ CheckPassedSerials();
+
+ if (ConsumedError(TickImpl())) {
+ return;
+ }
+
+ // There is no GPU work in flight, we need to move the serials forward so that
+ // so that CPU operations waiting on GPU completion can know they don't have to wait.
+ // AssumeCommandsComplete will assign the max serial we must tick to in order to
+ // fire the awaiting callbacks.
+ if (mCompletedSerial == mLastSubmittedSerial) {
+ AssumeCommandsComplete();
+ }
- // TODO(cwallez@chromium.org): decouple TickImpl from updating the serial so that we can
- // tick the dynamic uploader before the backend resource allocators. This would allow
- // reclaiming resources one tick earlier.
- mDynamicUploader->Deallocate(GetCompletedCommandSerial());
- mErrorScopeTracker->Tick(GetCompletedCommandSerial());
- mFenceSignalTracker->Tick(GetCompletedCommandSerial());
+ // TODO(cwallez@chromium.org): decouple TickImpl from updating the serial so that we can
+ // tick the dynamic uploader before the backend resource allocators. This would allow
+ // reclaiming resources one tick earlier.
+ mDynamicUploader->Deallocate(mCompletedSerial);
+ mErrorScopeTracker->Tick(mCompletedSerial);
+ mFenceSignalTracker->Tick(mCompletedSerial);
+ mMapRequestTracker->Tick(mCompletedSerial);
+ }
}
void DeviceBase::Reference() {
@@ -842,6 +900,16 @@ namespace dawn_native {
return {};
}
+ MaybeError DeviceBase::CreateQuerySetInternal(QuerySetBase** result,
+ const QuerySetDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateQuerySetDescriptor(this, descriptor));
+ }
+ DAWN_TRY_ASSIGN(*result, CreateQuerySetImpl(descriptor));
+ return {};
+ }
+
MaybeError DeviceBase::CreateRenderBundleEncoderInternal(
RenderBundleEncoder** result,
const RenderBundleEncoderDescriptor* descriptor) {
@@ -939,6 +1007,13 @@ namespace dawn_native {
ResultOrError<Ref<TextureBase>> DeviceBase::CreateTextureInternal(
const TextureDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
+
+ // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
+ // GPUTextureDescriptor.size.depth is done.
+ TextureDescriptor fixedDescriptor;
+ DAWN_TRY_ASSIGN(fixedDescriptor, FixTextureDescriptor(this, descriptor));
+ descriptor = &fixedDescriptor;
+
if (IsValidationEnabled()) {
DAWN_TRY(ValidateTextureDescriptor(this, descriptor));
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.h b/chromium/third_party/dawn/src/dawn_native/Device.h
index 0a270a54cf8..94d916f7c2b 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.h
+++ b/chromium/third_party/dawn/src/dawn_native/Device.h
@@ -37,6 +37,7 @@ namespace dawn_native {
class ErrorScope;
class ErrorScopeTracker;
class FenceSignalTracker;
+ class MapRequestTracker;
class StagingBufferBase;
class DeviceBase {
@@ -71,6 +72,7 @@ namespace dawn_native {
ErrorScopeTracker* GetErrorScopeTracker() const;
FenceSignalTracker* GetFenceSignalTracker() const;
+ MapRequestTracker* GetMapRequestTracker() const;
// Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
// isn't a valid wgpu::TextureFormat or isn't supported by this device.
@@ -88,6 +90,7 @@ namespace dawn_native {
Serial GetCompletedCommandSerial() const;
Serial GetLastSubmittedCommandSerial() const;
+ Serial GetFutureCallbackSerial() const;
Serial GetPendingCommandSerial() const;
virtual MaybeError TickImpl() = 0;
@@ -143,6 +146,7 @@ namespace dawn_native {
CommandEncoder* CreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
ComputePipelineBase* CreateComputePipeline(const ComputePipelineDescriptor* descriptor);
PipelineLayoutBase* CreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
+ QuerySetBase* CreateQuerySet(const QuerySetDescriptor* descriptor);
QueueBase* CreateQueue();
RenderBundleEncoder* CreateRenderBundleEncoder(
const RenderBundleEncoderDescriptor* descriptor);
@@ -154,6 +158,9 @@ namespace dawn_native {
TextureViewBase* CreateTextureView(TextureBase* texture,
const TextureViewDescriptor* descriptor);
+ // For Dawn Wire
+ BufferBase* CreateErrorBuffer();
+
QueueBase* GetDefaultQueue();
void InjectError(wgpu::ErrorType type, const char* message);
@@ -212,6 +219,7 @@ namespace dawn_native {
size_t GetDeprecationWarningCountForTesting();
void EmitDeprecationWarning(const char* warning);
void LoseForTesting();
+ void AddFutureCallbackSerial(Serial serial);
protected:
void SetToggle(Toggle toggle, bool isEnabled);
@@ -222,13 +230,6 @@ namespace dawn_native {
// Incrememt mLastSubmittedSerial when we submit the next serial
void IncrementLastSubmittedCommandSerial();
- // If there's no GPU work in flight we still need to artificially increment the serial
- // so that CPU operations waiting on GPU completion can know they don't have to wait.
- void ArtificiallyIncrementSerials();
- // During shut down of device, some operations might have been started since the last submit
- // and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
- // make all commands look completed.
- void AssumeCommandsComplete();
// Check for passed fences and set the new completed serial
void CheckPassedSerials();
@@ -242,6 +243,8 @@ namespace dawn_native {
const ComputePipelineDescriptor* descriptor) = 0;
virtual ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) = 0;
+ virtual ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) = 0;
virtual ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) = 0;
virtual ResultOrError<SamplerBase*> CreateSamplerImpl(
@@ -270,6 +273,8 @@ namespace dawn_native {
const ComputePipelineDescriptor* descriptor);
MaybeError CreatePipelineLayoutInternal(PipelineLayoutBase** result,
const PipelineLayoutDescriptor* descriptor);
+ MaybeError CreateQuerySetInternal(QuerySetBase** result,
+ const QuerySetDescriptor* descriptor);
MaybeError CreateRenderBundleEncoderInternal(
RenderBundleEncoder** result,
const RenderBundleEncoderDescriptor* descriptor);
@@ -296,14 +301,21 @@ namespace dawn_native {
// Each backend should implement to check their passed fences if there are any and return a
// completed serial. Return 0 should indicate no fences to check.
virtual Serial CheckAndUpdateCompletedSerials() = 0;
+ // During shut down of device, some operations might have been started since the last submit
+ // and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
+ // make all commands look completed.
+ void AssumeCommandsComplete();
// mCompletedSerial tracks the last completed command serial that the fence has returned.
// mLastSubmittedSerial tracks the last submitted command serial.
// During device removal, the serials could be artificially incremented
// to make it appear as if commands have been compeleted. They can also be artificially
// incremented when no work is being done in the GPU so CPU operations don't have to wait on
// stale serials.
+ // mFutureCallbackSerial tracks the largest serial we need to tick to for the callbacks to
+ // fire
Serial mCompletedSerial = 0;
Serial mLastSubmittedSerial = 0;
+ Serial mFutureCallbackSerial = 0;
// ShutDownImpl is used to clean up and release resources used by device, does not wait for
// GPU or check errors.
@@ -331,6 +343,7 @@ namespace dawn_native {
std::unique_ptr<DynamicUploader> mDynamicUploader;
std::unique_ptr<ErrorScopeTracker> mErrorScopeTracker;
std::unique_ptr<FenceSignalTracker> mFenceSignalTracker;
+ std::unique_ptr<MapRequestTracker> mMapRequestTracker;
Ref<QueueBase> mDefaultQueue;
struct DeprecationWarnings;
diff --git a/chromium/third_party/dawn/src/dawn_native/Error.cpp b/chromium/third_party/dawn/src/dawn_native/Error.cpp
index 13db32ecf7c..6dcc3f86c13 100644
--- a/chromium/third_party/dawn/src/dawn_native/Error.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Error.cpp
@@ -19,10 +19,14 @@
namespace dawn_native {
- void AssertAndIgnoreDeviceLossError(MaybeError maybeError) {
+ void IgnoreErrors(MaybeError maybeError) {
if (maybeError.IsError()) {
std::unique_ptr<ErrorData> errorData = maybeError.AcquireError();
- ASSERT(errorData->GetType() == InternalErrorType::DeviceLost);
+ // During shutdown and destruction, device lost errors can be ignored.
+ // We can also ignore other unexpected internal errors on shut down and treat it as
+ // device lost so that we can continue with destruction.
+ ASSERT(errorData->GetType() == InternalErrorType::DeviceLost ||
+ errorData->GetType() == InternalErrorType::Internal);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Error.h b/chromium/third_party/dawn/src/dawn_native/Error.h
index 41a1eff2267..3d5d5c38762 100644
--- a/chromium/third_party/dawn/src/dawn_native/Error.h
+++ b/chromium/third_party/dawn/src/dawn_native/Error.h
@@ -114,7 +114,7 @@ namespace dawn_native {
break
// Assert that errors are device loss so that we can continue with destruction
- void AssertAndIgnoreDeviceLossError(MaybeError maybeError);
+ void IgnoreErrors(MaybeError maybeError);
wgpu::ErrorType ToWGPUErrorType(InternalErrorType type);
InternalErrorType FromWGPUErrorType(wgpu::ErrorType type);
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.cpp
index 409b36b6969..b110e97b003 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.cpp
@@ -37,6 +37,7 @@ namespace dawn_native {
void ErrorScopeTracker::TrackUntilLastSubmitComplete(ErrorScope* scope) {
mScopesInFlight.Enqueue(scope, mDevice->GetLastSubmittedCommandSerial());
+ mDevice->AddFutureCallbackSerial(mDevice->GetPendingCommandSerial());
}
void ErrorScopeTracker::Tick(Serial completedSerial) {
diff --git a/chromium/third_party/dawn/src/dawn_native/Extensions.cpp b/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
index a2b5a9dff16..d356616558d 100644
--- a/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
@@ -34,7 +34,20 @@ namespace dawn_native {
{{Extension::TextureCompressionBC,
{"texture_compression_bc", "Support Block Compressed (BC) texture formats",
"https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
- &WGPUDeviceProperties::textureCompressionBC}}};
+ &WGPUDeviceProperties::textureCompressionBC},
+ {Extension::ShaderFloat16,
+ {"shader_float16",
+ "Support 16bit float arithmetic and declarations in uniform and storage buffers",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
+ &WGPUDeviceProperties::shaderFloat16},
+ {Extension::PipelineStatisticsQuery,
+ {"pipeline_statistics_query", "Support Pipeline Statistics Query",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+ &WGPUDeviceProperties::pipelineStatisticsQuery},
+ {Extension::TimestampQuery,
+ {"timestamp_query", "Support Timestamp Query",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+ &WGPUDeviceProperties::timestampQuery}}};
} // anonymous namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/Extensions.h b/chromium/third_party/dawn/src/dawn_native/Extensions.h
index 6e6d82d9f4d..ba32ee153e5 100644
--- a/chromium/third_party/dawn/src/dawn_native/Extensions.h
+++ b/chromium/third_party/dawn/src/dawn_native/Extensions.h
@@ -25,6 +25,9 @@ namespace dawn_native {
enum class Extension {
TextureCompressionBC,
+ ShaderFloat16,
+ PipelineStatisticsQuery,
+ TimestampQuery,
EnumCount,
InvalidEnum = EnumCount,
@@ -61,4 +64,4 @@ namespace dawn_native {
} // namespace dawn_native
-#endif // DAWNNATIVE_EXTENSIONS_H_ \ No newline at end of file
+#endif // DAWNNATIVE_EXTENSIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp b/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp
index 1daf10a9980..b8243a256ce 100644
--- a/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp
@@ -31,6 +31,7 @@ namespace dawn_native {
// the fence completed value once the last submitted serial has passed.
mFencesInFlight.Enqueue(FenceInFlight{fence, value},
mDevice->GetLastSubmittedCommandSerial());
+ mDevice->AddFutureCallbackSerial(mDevice->GetPendingCommandSerial());
}
void FenceSignalTracker::Tick(Serial finishedSerial) {
diff --git a/chromium/third_party/dawn/src/dawn_native/Forward.h b/chromium/third_party/dawn/src/dawn_native/Forward.h
index be7c98f9e33..66e07e42ac6 100644
--- a/chromium/third_party/dawn/src/dawn_native/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/Forward.h
@@ -34,6 +34,7 @@ namespace dawn_native {
class InstanceBase;
class PipelineBase;
class PipelineLayoutBase;
+ class QuerySetBase;
class QueueBase;
class RenderBundleBase;
class RenderBundleEncoder;
diff --git a/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.cpp b/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.cpp
new file mode 100644
index 00000000000..8f33e023110
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.cpp
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/MapRequestTracker.h"
+#include "dawn_native/Buffer.h"
+#include "dawn_native/Device.h"
+
+namespace dawn_native {
+ struct Request;
+ class DeviceBase;
+
+ MapRequestTracker::MapRequestTracker(DeviceBase* device) : mDevice(device) {
+ }
+
+ MapRequestTracker::~MapRequestTracker() {
+ ASSERT(mInflightRequests.Empty());
+ }
+
+ void MapRequestTracker::Track(BufferBase* buffer, uint32_t mapSerial, bool isWrite) {
+ Request request;
+ request.buffer = buffer;
+ request.mapSerial = mapSerial;
+ request.isWrite = isWrite;
+
+ mInflightRequests.Enqueue(std::move(request), mDevice->GetPendingCommandSerial());
+ mDevice->AddFutureCallbackSerial(mDevice->GetPendingCommandSerial());
+ }
+
+ void MapRequestTracker::Tick(Serial finishedSerial) {
+ for (auto& request : mInflightRequests.IterateUpTo(finishedSerial)) {
+ request.buffer->OnMapCommandSerialFinished(request.mapSerial, request.isWrite);
+ }
+ mInflightRequests.ClearUpTo(finishedSerial);
+ }
+} // namespace dawn_native \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.h b/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.h
new file mode 100644
index 00000000000..0dffca18f3d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.h
@@ -0,0 +1,44 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_MAPREQUESTTRACKER_H_
+#define DAWNNATIVE_MAPREQUESTTRACKER_H_
+
+#include "common/SerialQueue.h"
+#include "dawn_native/Device.h"
+
+namespace dawn_native {
+
+ class MapRequestTracker {
+ public:
+ MapRequestTracker(DeviceBase* device);
+ ~MapRequestTracker();
+
+ void Track(BufferBase* buffer, uint32_t mapSerial, bool isWrite);
+ void Tick(Serial finishedSerial);
+
+ private:
+ DeviceBase* mDevice;
+
+ struct Request {
+ Ref<BufferBase> buffer;
+ uint32_t mapSerial;
+ bool isWrite;
+ };
+ SerialQueue<Request> mInflightRequests;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_MAPREQUESTTRACKER_H \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
index 470aad5b2f0..9271114bc58 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
@@ -28,11 +28,25 @@ namespace dawn_native {
enum class PassType { Render, Compute };
// Describe the usage of the whole texture and its subresources.
- // subresourceUsages vector is used to track every subresource's usage within a texture.
- // usage variable is used the track the whole texture even though it can be deduced from
- // subresources' usages. This is designed deliberately to track texture usage in a fast path.
+ // - subresourceUsages vector is used to track every subresource's usage within a texture.
+ //
+ // - usage variable is used the track the whole texture even though it can be deduced from
+ // subresources' usages. This is designed deliberately to track texture usage in a fast path
+ // at frontend.
+ //
+ // - sameUsagesAcrossSubresources is used for optimization at backend. If the texture view
+ // we are using covers all subresources, then the texture's usages of all subresources are
+ // the same. Otherwise the texture's usages of all subresources are thought as different,
+ // although we can deliberately design some particular cases in which we have a few texture
+ // views and all of them have the same usages and they cover all subresources of the texture
+ // altogether.
+
+ // TODO(yunchao.he@intel.com): if sameUsagesAcrossSubresources is true, we don't need
+ // the vector to record every single subresource's Usages. The texture usage is enough. And we
+ // can decompress texture usage to a vector if necessary.
struct PassTextureUsage {
wgpu::TextureUsage usage;
+ bool sameUsagesAcrossSubresources;
std::vector<wgpu::TextureUsage> subresourceUsages;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
index 1f9c3500621..f5e4a5664e0 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
@@ -40,19 +40,20 @@ namespace dawn_native {
// TODO (yunchao.he@intel.com): optimize this
PassTextureUsage& textureUsage = mTextureUsages[texture];
- // Set usage for the whole texture
+ // Set parameters for the whole texture
textureUsage.usage |= usage;
+ uint32_t subresourceCount = texture->GetSubresourceCount();
+ textureUsage.sameUsagesAcrossSubresources = levelCount * layerCount == subresourceCount;
// Set usages for subresources
- uint32_t subresourceCount =
- texture->GetSubresourceIndex(texture->GetNumMipLevels(), texture->GetArrayLayers());
if (!textureUsage.subresourceUsages.size()) {
textureUsage.subresourceUsages =
std::vector<wgpu::TextureUsage>(subresourceCount, wgpu::TextureUsage::None);
}
- for (uint32_t mipLevel = baseMipLevel; mipLevel < baseMipLevel + levelCount; ++mipLevel) {
- for (uint32_t arrayLayer = baseArrayLayer; arrayLayer < baseArrayLayer + layerCount;
- ++arrayLayer) {
+ for (uint32_t arrayLayer = baseArrayLayer; arrayLayer < baseArrayLayer + layerCount;
+ ++arrayLayer) {
+ for (uint32_t mipLevel = baseMipLevel; mipLevel < baseMipLevel + levelCount;
+ ++mipLevel) {
uint32_t subresourceIndex = texture->GetSubresourceIndex(mipLevel, arrayLayer);
textureUsage.subresourceUsages[subresourceIndex] |= usage;
}
@@ -63,9 +64,9 @@ namespace dawn_native {
const PassTextureUsage& textureUsage) {
PassTextureUsage& passTextureUsage = mTextureUsages[texture];
passTextureUsage.usage |= textureUsage.usage;
+ passTextureUsage.sameUsagesAcrossSubresources &= textureUsage.sameUsagesAcrossSubresources;
- uint32_t subresourceCount =
- texture->GetSubresourceIndex(texture->GetNumMipLevels(), texture->GetArrayLayers());
+ uint32_t subresourceCount = texture->GetSubresourceCount();
ASSERT(textureUsage.subresourceUsages.size() == subresourceCount);
if (!passTextureUsage.subresourceUsages.size()) {
passTextureUsage.subresourceUsages = textureUsage.subresourceUsages;
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
index 6ca811f70fd..df344416a56 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
@@ -33,8 +33,8 @@ namespace dawn_native {
if (descriptor->module->GetExecutionModel() != stage) {
return DAWN_VALIDATION_ERROR("Setting module with wrong stages");
}
- if (layout != nullptr && !descriptor->module->IsCompatibleWithPipelineLayout(layout)) {
- return DAWN_VALIDATION_ERROR("Stage not compatible with layout");
+ if (layout != nullptr) {
+ DAWN_TRY(descriptor->module->ValidateCompatibilityWithPipelineLayout(layout));
}
return {};
}
@@ -43,8 +43,12 @@ namespace dawn_native {
PipelineBase::PipelineBase(DeviceBase* device,
PipelineLayoutBase* layout,
- wgpu::ShaderStage stages)
- : CachedObject(device), mStageMask(stages), mLayout(layout) {
+ wgpu::ShaderStage stages,
+ RequiredBufferSizes minimumBufferSizes)
+ : CachedObject(device),
+ mStageMask(stages),
+ mLayout(layout),
+ mMinimumBufferSizes(std::move(minimumBufferSizes)) {
}
PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -66,6 +70,11 @@ namespace dawn_native {
return mLayout.Get();
}
+ const RequiredBufferSizes& PipelineBase::GetMinimumBufferSizes() const {
+ ASSERT(!IsError());
+ return mMinimumBufferSizes;
+ }
+
MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
@@ -76,11 +85,12 @@ namespace dawn_native {
return {};
}
- BindGroupLayoutBase* PipelineBase::GetBindGroupLayout(uint32_t groupIndex) {
- if (GetDevice()->ConsumedError(ValidateGetBindGroupLayout(groupIndex))) {
+ BindGroupLayoutBase* PipelineBase::GetBindGroupLayout(uint32_t groupIndexIn) {
+ if (GetDevice()->ConsumedError(ValidateGetBindGroupLayout(groupIndexIn))) {
return BindGroupLayoutBase::MakeError(GetDevice());
}
+ BindGroupIndex groupIndex(groupIndexIn);
if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
// Get or create an empty bind group layout.
// TODO(enga): Consider caching this object on the Device and reusing it.
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.h b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
index d248e49d889..bfc846bcde9 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
@@ -39,9 +39,13 @@ namespace dawn_native {
PipelineLayoutBase* GetLayout();
const PipelineLayoutBase* GetLayout() const;
BindGroupLayoutBase* GetBindGroupLayout(uint32_t groupIndex);
+ const RequiredBufferSizes& GetMinimumBufferSizes() const;
protected:
- PipelineBase(DeviceBase* device, PipelineLayoutBase* layout, wgpu::ShaderStage stages);
+ PipelineBase(DeviceBase* device,
+ PipelineLayoutBase* layout,
+ wgpu::ShaderStage stages,
+ RequiredBufferSizes bufferSizes);
PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
private:
@@ -49,6 +53,7 @@ namespace dawn_native {
wgpu::ShaderStage mStageMask;
Ref<PipelineLayoutBase> mLayout;
+ RequiredBufferSizes mMinimumBufferSizes;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
index 0f32ec6181e..def4875c445 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
@@ -25,7 +25,9 @@ namespace dawn_native {
namespace {
- bool operator==(const BindGroupLayoutEntry& lhs, const BindGroupLayoutEntry& rhs) {
+ bool InferredBindGroupLayoutEntriesCompatible(const BindGroupLayoutEntry& lhs,
+ const BindGroupLayoutEntry& rhs) {
+ // Minimum buffer binding size excluded because we take the maximum seen across stages
return lhs.binding == rhs.binding && lhs.visibility == rhs.visibility &&
lhs.type == rhs.type && lhs.hasDynamicOffset == rhs.hasDynamicOffset &&
lhs.multisampled == rhs.multisampled && lhs.viewDimension == rhs.viewDimension &&
@@ -95,8 +97,9 @@ namespace dawn_native {
const PipelineLayoutDescriptor* descriptor)
: CachedObject(device) {
ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
- for (uint32_t group = 0; group < descriptor->bindGroupLayoutCount; ++group) {
- mBindGroupLayouts[group] = descriptor->bindGroupLayouts[group];
+ for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount);
+ ++group) {
+ mBindGroupLayouts[group] = descriptor->bindGroupLayouts[static_cast<uint32_t>(group)];
mMask.set(group);
}
}
@@ -125,36 +128,42 @@ namespace dawn_native {
ASSERT(count > 0);
// Data which BindGroupLayoutDescriptor will point to for creation
- std::array<std::array<BindGroupLayoutEntry, kMaxBindingsPerGroup>, kMaxBindGroups>
+ ityp::array<BindGroupIndex,
+ ityp::array<BindingIndex, BindGroupLayoutEntry, kMaxBindingsPerGroup>,
+ kMaxBindGroups>
entryData = {};
// A map of bindings to the index in |entryData|
- std::array<std::map<BindingNumber, BindingIndex>, kMaxBindGroups> usedBindingsMap = {};
+ ityp::array<BindGroupIndex, std::map<BindingNumber, BindingIndex>, kMaxBindGroups>
+ usedBindingsMap = {};
// A counter of how many bindings we've populated in |entryData|
- std::array<uint32_t, kMaxBindGroups> entryCounts = {};
+ ityp::array<BindGroupIndex, BindingIndex, kMaxBindGroups> entryCounts = {};
- uint32_t bindGroupLayoutCount = 0;
+ BindGroupIndex bindGroupLayoutCount(0);
for (uint32_t moduleIndex = 0; moduleIndex < count; ++moduleIndex) {
const ShaderModuleBase* module = modules[moduleIndex];
const ShaderModuleBase::ModuleBindingInfo& info = module->GetBindingInfo();
- for (uint32_t group = 0; group < info.size(); ++group) {
+ for (BindGroupIndex group(0); group < info.size(); ++group) {
for (const auto& it : info[group]) {
BindingNumber bindingNumber = it.first;
const ShaderModuleBase::ShaderBindingInfo& bindingInfo = it.second;
- if (bindingInfo.multisampled) {
- return DAWN_VALIDATION_ERROR("Multisampled textures not supported (yet)");
- }
-
BindGroupLayoutEntry bindingSlot;
- bindingSlot.binding = bindingNumber;
+ bindingSlot.binding = static_cast<uint32_t>(bindingNumber);
DAWN_TRY(ValidateBindingTypeWithShaderStageVisibility(
bindingInfo.type, StageBit(module->GetExecutionModel())));
DAWN_TRY(ValidateStorageTextureFormat(device, bindingInfo.type,
bindingInfo.storageTextureFormat));
+ DAWN_TRY(ValidateStorageTextureViewDimension(bindingInfo.type,
+ bindingInfo.viewDimension));
+
+ if (bindingInfo.multisampled) {
+ DAWN_TRY(ValidateBindingCanBeMultisampled(bindingInfo.type,
+ bindingInfo.viewDimension));
+ }
bindingSlot.visibility =
GetShaderStageVisibilityWithBindingType(bindingInfo.type);
@@ -166,38 +175,50 @@ namespace dawn_native {
bindingSlot.textureComponentType =
Format::FormatTypeToTextureComponentType(bindingInfo.textureComponentType);
bindingSlot.storageTextureFormat = bindingInfo.storageTextureFormat;
+ bindingSlot.minBufferBindingSize = bindingInfo.minBufferBindingSize;
{
const auto& it = usedBindingsMap[group].find(bindingNumber);
if (it != usedBindingsMap[group].end()) {
- if (bindingSlot == entryData[group][it->second]) {
- // Already used and the data is the same. Continue.
- continue;
- } else {
+ BindGroupLayoutEntry* existingEntry = &entryData[group][it->second];
+
+ // Check if any properties are incompatible with existing entry
+ // If compatible, we will merge some properties
+ if (!InferredBindGroupLayoutEntriesCompatible(*existingEntry,
+ bindingSlot)) {
return DAWN_VALIDATION_ERROR(
"Duplicate binding in default pipeline layout initialization "
"not compatible with previous declaration");
}
+
+ // Use the max |minBufferBindingSize| we find
+ existingEntry->minBufferBindingSize =
+ std::max(existingEntry->minBufferBindingSize,
+ bindingSlot.minBufferBindingSize);
+
+ // Already used slot, continue
+ continue;
}
}
- uint32_t currentBindingCount = entryCounts[group];
+ BindingIndex currentBindingCount = entryCounts[group];
entryData[group][currentBindingCount] = bindingSlot;
usedBindingsMap[group][bindingNumber] = currentBindingCount;
entryCounts[group]++;
- bindGroupLayoutCount = std::max(bindGroupLayoutCount, group + 1);
+ bindGroupLayoutCount =
+ std::max(bindGroupLayoutCount, group + BindGroupIndex(1));
}
}
}
- std::array<BindGroupLayoutBase*, kMaxBindGroups> bindGroupLayouts = {};
- for (uint32_t group = 0; group < bindGroupLayoutCount; ++group) {
+ ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bindGroupLayouts = {};
+ for (BindGroupIndex group(0); group < bindGroupLayoutCount; ++group) {
BindGroupLayoutDescriptor desc = {};
desc.entries = entryData[group].data();
- desc.entryCount = entryCounts[group];
+ desc.entryCount = static_cast<uint32_t>(entryCounts[group]);
// We should never produce a bad descriptor.
ASSERT(!ValidateBindGroupLayoutDescriptor(device, &desc).IsError());
@@ -206,69 +227,71 @@ namespace dawn_native {
PipelineLayoutDescriptor desc = {};
desc.bindGroupLayouts = bindGroupLayouts.data();
- desc.bindGroupLayoutCount = bindGroupLayoutCount;
+ desc.bindGroupLayoutCount = static_cast<uint32_t>(bindGroupLayoutCount);
PipelineLayoutBase* pipelineLayout = device->CreatePipelineLayout(&desc);
ASSERT(!pipelineLayout->IsError());
// These bind group layouts are created internally and referenced by the pipeline layout.
// Release the external refcount.
- for (uint32_t group = 0; group < bindGroupLayoutCount; ++group) {
+ for (BindGroupIndex group(0); group < bindGroupLayoutCount; ++group) {
if (bindGroupLayouts[group] != nullptr) {
bindGroupLayouts[group]->Release();
}
}
for (uint32_t moduleIndex = 0; moduleIndex < count; ++moduleIndex) {
- ASSERT(modules[moduleIndex]->IsCompatibleWithPipelineLayout(pipelineLayout));
+ ASSERT(modules[moduleIndex]
+ ->ValidateCompatibilityWithPipelineLayout(pipelineLayout)
+ .IsSuccess());
}
return pipelineLayout;
}
- const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(uint32_t group) const {
+ const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
ASSERT(!IsError());
- ASSERT(group < kMaxBindGroups);
+ ASSERT(group < kMaxBindGroupsTyped);
ASSERT(mMask[group]);
const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
ASSERT(bgl != nullptr);
return bgl;
}
- BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(uint32_t group) {
+ BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) {
ASSERT(!IsError());
- ASSERT(group < kMaxBindGroups);
+ ASSERT(group < kMaxBindGroupsTyped);
ASSERT(mMask[group]);
BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
ASSERT(bgl != nullptr);
return bgl;
}
- const std::bitset<kMaxBindGroups> PipelineLayoutBase::GetBindGroupLayoutsMask() const {
+ const BindGroupLayoutMask& PipelineLayoutBase::GetBindGroupLayoutsMask() const {
ASSERT(!IsError());
return mMask;
}
- std::bitset<kMaxBindGroups> PipelineLayoutBase::InheritedGroupsMask(
+ BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(
const PipelineLayoutBase* other) const {
ASSERT(!IsError());
- return {(1 << GroupsInheritUpTo(other)) - 1u};
+ return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
}
- uint32_t PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const {
+ BindGroupIndex PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const {
ASSERT(!IsError());
- for (uint32_t i = 0; i < kMaxBindGroups; ++i) {
+ for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
if (!mMask[i] || mBindGroupLayouts[i].Get() != other->mBindGroupLayouts[i].Get()) {
return i;
}
}
- return kMaxBindGroups;
+ return kMaxBindGroupsTyped;
}
size_t PipelineLayoutBase::HashFunc::operator()(const PipelineLayoutBase* pl) const {
size_t hash = Hash(pl->mMask);
- for (uint32_t group : IterateBitSet(pl->mMask)) {
+ for (BindGroupIndex group : IterateBitSet(pl->mMask)) {
HashCombine(&hash, pl->GetBindGroupLayout(group));
}
@@ -281,7 +304,7 @@ namespace dawn_native {
return false;
}
- for (uint32_t group : IterateBitSet(a->mMask)) {
+ for (BindGroupIndex group : IterateBitSet(a->mMask)) {
if (a->GetBindGroupLayout(group) != b->GetBindGroupLayout(group)) {
return false;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
index f919effbb5d..862caaf8c75 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
@@ -16,6 +16,9 @@
#define DAWNNATIVE_PIPELINELAYOUT_H_
#include "common/Constants.h"
+#include "common/ityp_array.h"
+#include "common/ityp_bitset.h"
+#include "dawn_native/BindingInfo.h"
#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
@@ -30,7 +33,9 @@ namespace dawn_native {
MaybeError ValidatePipelineLayoutDescriptor(DeviceBase*,
const PipelineLayoutDescriptor* descriptor);
- using BindGroupLayoutArray = std::array<Ref<BindGroupLayoutBase>, kMaxBindGroups>;
+ using BindGroupLayoutArray =
+ ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
+ using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
class PipelineLayoutBase : public CachedObject {
public:
@@ -41,17 +46,17 @@ namespace dawn_native {
static ResultOrError<PipelineLayoutBase*>
CreateDefault(DeviceBase* device, const ShaderModuleBase* const* modules, uint32_t count);
- const BindGroupLayoutBase* GetBindGroupLayout(uint32_t group) const;
- BindGroupLayoutBase* GetBindGroupLayout(uint32_t group);
- const std::bitset<kMaxBindGroups> GetBindGroupLayoutsMask() const;
+ const BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group) const;
+ BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group);
+ const BindGroupLayoutMask& GetBindGroupLayoutsMask() const;
// Utility functions to compute inherited bind groups.
// Returns the inherited bind groups as a mask.
- std::bitset<kMaxBindGroups> InheritedGroupsMask(const PipelineLayoutBase* other) const;
+ BindGroupLayoutMask InheritedGroupsMask(const PipelineLayoutBase* other) const;
// Returns the index of the first incompatible bind group in the range
- // [1, kMaxBindGroups + 1]
- uint32_t GroupsInheritUpTo(const PipelineLayoutBase* other) const;
+ // [0, kMaxBindGroups]
+ BindGroupIndex GroupsInheritUpTo(const PipelineLayoutBase* other) const;
// Functors necessary for the unordered_set<PipelineLayoutBase*>-based cache.
struct HashFunc {
@@ -65,7 +70,7 @@ namespace dawn_native {
PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
BindGroupLayoutArray mBindGroupLayouts;
- std::bitset<kMaxBindGroups> mMask;
+ BindGroupLayoutMask mMask;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
index df967522024..83aedad39dd 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/ProgrammablePassEncoder.h"
#include "common/BitSetIterator.h"
+#include "common/ityp_array.h"
#include "dawn_native/BindGroup.h"
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandBuffer.h"
@@ -29,8 +30,8 @@ namespace dawn_native {
namespace {
void TrackBindGroupResourceUsage(PassResourceUsageTracker* usageTracker,
BindGroupBase* group) {
- for (BindingIndex bindingIndex = 0;
- bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
+ for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+ ++bindingIndex) {
wgpu::BindingType type = group->GetLayout()->GetBindingInfo(bindingIndex).type;
switch (type) {
@@ -129,25 +130,30 @@ namespace dawn_native {
});
}
- void ProgrammablePassEncoder::SetBindGroup(uint32_t groupIndex,
+ void ProgrammablePassEncoder::SetBindGroup(uint32_t groupIndexIn,
BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) {
+ uint32_t dynamicOffsetCountIn,
+ const uint32_t* dynamicOffsetsIn) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ BindGroupIndex groupIndex(groupIndexIn);
+
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(group));
- if (groupIndex >= kMaxBindGroups) {
+ if (groupIndex >= kMaxBindGroupsTyped) {
return DAWN_VALIDATION_ERROR("Setting bind group over the max");
}
+ ityp::span<BindingIndex, const uint32_t> dynamicOffsets(
+ dynamicOffsetsIn, BindingIndex(dynamicOffsetCountIn));
+
// Dynamic offsets count must match the number required by the layout perfectly.
const BindGroupLayoutBase* layout = group->GetLayout();
- if (layout->GetDynamicBufferCount() != dynamicOffsetCount) {
+ if (layout->GetDynamicBufferCount() != dynamicOffsets.size()) {
return DAWN_VALIDATION_ERROR("dynamicOffset count mismatch");
}
- for (BindingIndex i = 0; i < dynamicOffsetCount; ++i) {
+ for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
// BGL creation sorts bindings such that the dynamic buffer bindings are first.
@@ -185,10 +191,10 @@ namespace dawn_native {
SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
cmd->index = groupIndex;
cmd->group = group;
- cmd->dynamicOffsetCount = dynamicOffsetCount;
- if (dynamicOffsetCount > 0) {
+ cmd->dynamicOffsetCount = dynamicOffsetCountIn;
+ if (dynamicOffsetCountIn > 0) {
uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
- memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
+ memcpy(offsets, dynamicOffsetsIn, dynamicOffsetCountIn * sizeof(uint32_t));
}
TrackBindGroupResourceUsage(&mUsageTracker, group);
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp b/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
new file mode 100644
index 00000000000..513658db4c7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
@@ -0,0 +1,153 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/QuerySet.h"
+
+#include "dawn_native/Device.h"
+#include "dawn_native/Extensions.h"
+#include "dawn_native/ValidationUtils_autogen.h"
+
+#include <set>
+
+namespace dawn_native {
+
+ namespace {
+
+ class ErrorQuerySet final : public QuerySetBase {
+ public:
+ ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {
+ }
+
+ private:
+ void DestroyImpl() override {
+ UNREACHABLE();
+ }
+ };
+
+ } // anonymous namespace
+
+ MaybeError ValidateQuerySetDescriptor(DeviceBase* device,
+ const QuerySetDescriptor* descriptor) {
+ if (descriptor->nextInChain != nullptr) {
+ return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+ }
+
+ DAWN_TRY(ValidateQueryType(descriptor->type));
+
+ switch (descriptor->type) {
+ case wgpu::QueryType::Occlusion:
+ if (descriptor->pipelineStatisticsCount != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "The pipeline statistics should not be set if query type is Occlusion");
+ }
+ break;
+
+ case wgpu::QueryType::PipelineStatistics: {
+ if (!device->IsExtensionEnabled(Extension::PipelineStatisticsQuery)) {
+ return DAWN_VALIDATION_ERROR(
+ "The pipeline statistics query feature is not supported");
+ }
+
+ if (descriptor->pipelineStatisticsCount == 0) {
+ return DAWN_VALIDATION_ERROR(
+ "At least one pipeline statistics is set if query type is "
+ "PipelineStatistics");
+ }
+
+ std::set<wgpu::PipelineStatisticsName> pipelineStatisticsSet;
+ for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+ DAWN_TRY(ValidatePipelineStatisticsName(descriptor->pipelineStatistics[i]));
+
+ std::pair<std::set<wgpu::PipelineStatisticsName>::iterator, bool> res =
+ pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
+ if (!res.second) {
+ return DAWN_VALIDATION_ERROR("Duplicate pipeline statistics found");
+ }
+ }
+ } break;
+
+ case wgpu::QueryType::Timestamp:
+ if (!device->IsExtensionEnabled(Extension::TimestampQuery)) {
+ return DAWN_VALIDATION_ERROR("The timestamp query feature is not supported");
+ }
+
+ if (descriptor->pipelineStatisticsCount != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "The pipeline statistics should not be set if query type is Timestamp");
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return {};
+ }
+
+ QuerySetBase::QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor)
+ : ObjectBase(device),
+ mQueryType(descriptor->type),
+ mQueryCount(descriptor->count),
+ mState(QuerySetState::Available) {
+ for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+ mPipelineStatistics.push_back(descriptor->pipelineStatistics[i]);
+ }
+ }
+
+ QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ObjectBase(device, tag) {
+ }
+
+ QuerySetBase::~QuerySetBase() {
+ // Uninitialized or already destroyed
+ ASSERT(mState == QuerySetState::Unavailable || mState == QuerySetState::Destroyed);
+ }
+
+ // static
+ QuerySetBase* QuerySetBase::MakeError(DeviceBase* device) {
+ return new ErrorQuerySet(device);
+ }
+
+ wgpu::QueryType QuerySetBase::GetQueryType() const {
+ return mQueryType;
+ }
+
+ uint32_t QuerySetBase::GetQueryCount() const {
+ return mQueryCount;
+ }
+
+ const std::vector<wgpu::PipelineStatisticsName>& QuerySetBase::GetPipelineStatistics() const {
+ return mPipelineStatistics;
+ }
+
+ void QuerySetBase::Destroy() {
+ if (GetDevice()->ConsumedError(ValidateDestroy())) {
+ return;
+ }
+ DestroyInternal();
+ }
+
+ MaybeError QuerySetBase::ValidateDestroy() const {
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ return {};
+ }
+
+ void QuerySetBase::DestroyInternal() {
+ if (mState != QuerySetState::Destroyed) {
+ DestroyImpl();
+ }
+ mState = QuerySetState::Destroyed;
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.h b/chromium/third_party/dawn/src/dawn_native/QuerySet.h
new file mode 100644
index 00000000000..7883678ffb0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/QuerySet.h
@@ -0,0 +1,61 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_QUERYSET_H_
+#define DAWNNATIVE_QUERYSET_H_
+
+#include "dawn_native/Error.h"
+#include "dawn_native/Forward.h"
+#include "dawn_native/ObjectBase.h"
+
+#include "dawn_native/dawn_platform.h"
+
+namespace dawn_native {
+
+ MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor);
+
+ class QuerySetBase : public ObjectBase {
+ public:
+ QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor);
+
+ static QuerySetBase* MakeError(DeviceBase* device);
+
+ wgpu::QueryType GetQueryType() const;
+ uint32_t GetQueryCount() const;
+ const std::vector<wgpu::PipelineStatisticsName>& GetPipelineStatistics() const;
+
+ void Destroy();
+
+ protected:
+ QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ ~QuerySetBase() override;
+
+ void DestroyInternal();
+
+ private:
+ virtual void DestroyImpl() = 0;
+
+ MaybeError ValidateDestroy() const;
+
+ wgpu::QueryType mQueryType;
+ uint32_t mQueryCount;
+ std::vector<wgpu::PipelineStatisticsName> mPipelineStatistics;
+
+ enum class QuerySetState { Unavailable, Available, Destroyed };
+ QuerySetState mState = QuerySetState::Unavailable;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_QUERYSET_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.cpp b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
index 3af330af105..3dcf2b0e469 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
@@ -17,6 +17,7 @@
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandBuffer.h"
#include "dawn_native/Device.h"
+#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorScope.h"
#include "dawn_native/ErrorScopeTracker.h"
#include "dawn_native/Fence.h"
@@ -25,6 +26,8 @@
#include "dawn_platform/DawnPlatform.h"
#include "dawn_platform/tracing/TraceEvent.h"
+#include <cstring>
+
namespace dawn_native {
// QueueBase
@@ -91,8 +94,44 @@ namespace dawn_native {
return new Fence(this, descriptor);
}
+ void QueueBase::WriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ GetDevice()->ConsumedError(WriteBufferInternal(buffer, bufferOffset, data, size));
+ }
+
+ MaybeError QueueBase::WriteBufferInternal(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ DAWN_TRY(ValidateWriteBuffer(buffer, bufferOffset, size));
+ return WriteBufferImpl(buffer, bufferOffset, data, size);
+ }
+
+ MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ if (size == 0) {
+ return {};
+ }
+
+ DeviceBase* device = GetDevice();
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial()));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+
+ memcpy(uploadHandle.mappedBuffer, data, size);
+
+ return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
+ buffer, bufferOffset, size);
+ }
+
MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
- CommandBufferBase* const* commands) {
+ CommandBufferBase* const* commands) const {
TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
DAWN_TRY(GetDevice()->ValidateObject(this));
@@ -103,7 +142,7 @@ namespace dawn_native {
for (const PassResourceUsage& passUsages : usages.perPass) {
for (const BufferBase* buffer : passUsages.buffers) {
- DAWN_TRY(buffer->ValidateCanUseInSubmitNow());
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
}
for (const TextureBase* texture : passUsages.textures) {
DAWN_TRY(texture->ValidateCanUseInSubmitNow());
@@ -111,7 +150,7 @@ namespace dawn_native {
}
for (const BufferBase* buffer : usages.topLevelBuffers) {
- DAWN_TRY(buffer->ValidateCanUseInSubmitNow());
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
}
for (const TextureBase* texture : usages.topLevelTextures) {
DAWN_TRY(texture->ValidateCanUseInSubmitNow());
@@ -121,7 +160,7 @@ namespace dawn_native {
return {};
}
- MaybeError QueueBase::ValidateSignal(const Fence* fence, uint64_t signalValue) {
+ MaybeError QueueBase::ValidateSignal(const Fence* fence, uint64_t signalValue) const {
DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(GetDevice()->ValidateObject(fence));
@@ -136,7 +175,7 @@ namespace dawn_native {
return {};
}
- MaybeError QueueBase::ValidateCreateFence(const FenceDescriptor* descriptor) {
+ MaybeError QueueBase::ValidateCreateFence(const FenceDescriptor* descriptor) const {
DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
if (descriptor != nullptr) {
@@ -146,4 +185,30 @@ namespace dawn_native {
return {};
}
+ MaybeError QueueBase::ValidateWriteBuffer(const BufferBase* buffer,
+ uint64_t bufferOffset,
+ size_t size) const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
+
+ if (bufferOffset % 4 != 0) {
+ return DAWN_VALIDATION_ERROR("Queue::WriteBuffer bufferOffset must be a multiple of 4");
+ }
+ if (size % 4 != 0) {
+ return DAWN_VALIDATION_ERROR("Queue::WriteBuffer size must be a multiple of 4");
+ }
+
+ uint64_t bufferSize = buffer->GetSize();
+ if (bufferOffset > bufferSize || size > (bufferSize - bufferOffset)) {
+ return DAWN_VALIDATION_ERROR("Queue::WriteBuffer out of range");
+ }
+
+ if (!(buffer->GetUsage() & wgpu::BufferUsage::CopyDst)) {
+ return DAWN_VALIDATION_ERROR("Buffer needs the CopyDst usage bit");
+ }
+
+ return buffer->ValidateCanUseOnQueueNow();
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.h b/chromium/third_party/dawn/src/dawn_native/Queue.h
index fd9d291f00c..5fd722ddf5e 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.h
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.h
@@ -33,15 +33,28 @@ namespace dawn_native {
void Submit(uint32_t commandCount, CommandBufferBase* const* commands);
void Signal(Fence* fence, uint64_t signalValue);
Fence* CreateFence(const FenceDescriptor* descriptor);
+ void WriteBuffer(BufferBase* buffer, uint64_t bufferOffset, const void* data, size_t size);
private:
QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- virtual MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands);
+ MaybeError WriteBufferInternal(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size);
- MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands);
- MaybeError ValidateSignal(const Fence* fence, uint64_t signalValue);
- MaybeError ValidateCreateFence(const FenceDescriptor* descriptor);
+ virtual MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands);
+ virtual MaybeError WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size);
+
+ MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
+ MaybeError ValidateSignal(const Fence* fence, uint64_t signalValue) const;
+ MaybeError ValidateCreateFence(const FenceDescriptor* descriptor) const;
+ MaybeError ValidateWriteBuffer(const BufferBase* buffer,
+ uint64_t bufferOffset,
+ size_t size) const;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
index dc4a508c29b..ceeb1eb2fc9 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
@@ -183,6 +183,29 @@ namespace dawn_native {
return {};
}
+ RequiredBufferSizes ComputeMinBufferSizes(const RenderPipelineDescriptor* descriptor) {
+ RequiredBufferSizes bufferSizes =
+ descriptor->vertexStage.module->ComputeRequiredBufferSizesForLayout(
+ descriptor->layout);
+
+ // Merge the two buffer size requirements by taking the larger element from each
+ if (descriptor->fragmentStage != nullptr) {
+ RequiredBufferSizes fragmentSizes =
+ descriptor->fragmentStage->module->ComputeRequiredBufferSizesForLayout(
+ descriptor->layout);
+
+ for (BindGroupIndex group(0); group < bufferSizes.size(); ++group) {
+ ASSERT(bufferSizes[group].size() == fragmentSizes[group].size());
+ for (size_t i = 0; i < bufferSizes[group].size(); ++i) {
+ bufferSizes[group][i] =
+ std::max(bufferSizes[group][i], fragmentSizes[group][i]);
+ }
+ }
+ }
+
+ return bufferSizes;
+ }
+
} // anonymous namespace
// Helper functions
@@ -380,7 +403,8 @@ namespace dawn_native {
const RenderPipelineDescriptor* descriptor)
: PipelineBase(device,
descriptor->layout,
- wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment),
+ wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
+ ComputeMinBufferSizes(descriptor)),
mAttachmentState(device->GetOrCreateAttachmentState(descriptor)),
mPrimitiveTopology(descriptor->primitiveTopology),
mSampleMask(descriptor->sampleMask),
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
index 68da595728d..04b6dfc2079 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
@@ -23,6 +23,14 @@
#include <spirv-tools/libspirv.hpp>
#include <spirv_cross.hpp>
+#ifdef DAWN_ENABLE_WGSL
+// Tint include must be after spirv_cross.hpp, because spirv-cross has its own
+// version of spirv_headers.
+// clang-format off
+#include <tint/tint.h>
+// clang-format on
+#endif // DAWN_ENABLE_WGSL
+
#include <sstream>
namespace dawn_native {
@@ -280,6 +288,13 @@ namespace dawn_native {
return wgpu::TextureFormat::Undefined;
}
}
+
+ std::string GetShaderDeclarationString(BindGroupIndex group, BindingNumber binding) {
+ std::ostringstream ostream;
+ ostream << "the shader module declaration at set " << static_cast<uint32_t>(group)
+ << " binding " << static_cast<uint32_t>(binding);
+ return ostream.str();
+ }
} // anonymous namespace
MaybeError ValidateSpirv(DeviceBase*, const uint32_t* code, uint32_t codeSize) {
@@ -316,6 +331,77 @@ namespace dawn_native {
return {};
}
+#ifdef DAWN_ENABLE_WGSL
+ MaybeError ValidateWGSL(const char* source) {
+ std::ostringstream errorStream;
+ errorStream << "Tint WGSL failure:" << std::endl;
+
+ tint::Context context;
+ tint::reader::wgsl::Parser parser(&context, source);
+
+ if (!parser.Parse()) {
+ errorStream << "Parser: " << parser.error() << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::ast::Module module = parser.module();
+ if (!module.IsValid()) {
+ errorStream << "Invalid module generated..." << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::TypeDeterminer type_determiner(&context, &module);
+ if (!type_determiner.Determine()) {
+ errorStream << "Type Determination: " << type_determiner.error();
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::Validator validator;
+ if (!validator.Validate(module)) {
+ errorStream << "Validation: " << validator.error() << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ return {};
+ }
+
+ ResultOrError<std::vector<uint32_t>> ConvertWGSLToSPIRV(const char* source) {
+ std::ostringstream errorStream;
+ errorStream << "Tint WGSL->SPIR-V failure:" << std::endl;
+
+ tint::Context context;
+ tint::reader::wgsl::Parser parser(&context, source);
+
+ // TODO: This is a duplicate parse with ValidateWGSL, need to store
+ // state between calls to avoid this.
+ if (!parser.Parse()) {
+ errorStream << "Parser: " << parser.error() << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::ast::Module module = parser.module();
+ if (!module.IsValid()) {
+ errorStream << "Invalid module generated..." << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::TypeDeterminer type_determiner(&context, &module);
+ if (!type_determiner.Determine()) {
+ errorStream << "Type Determination: " << type_determiner.error();
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::writer::spirv::Generator generator(std::move(module));
+ if (!generator.Generate()) {
+ errorStream << "Generator: " << generator.error() << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ std::vector<uint32_t> spirv = generator.result();
+ return std::move(spirv);
+ }
+#endif // DAWN_ENABLE_WGSL
+
MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
const ShaderModuleDescriptor* descriptor) {
const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
@@ -330,17 +416,22 @@ namespace dawn_native {
switch (chainedDescriptor->sType) {
case wgpu::SType::ShaderModuleSPIRVDescriptor: {
- const ShaderModuleSPIRVDescriptor* spirvDesc =
+ const auto* spirvDesc =
static_cast<const ShaderModuleSPIRVDescriptor*>(chainedDescriptor);
DAWN_TRY(ValidateSpirv(device, spirvDesc->code, spirvDesc->codeSize));
break;
}
case wgpu::SType::ShaderModuleWGSLDescriptor: {
- return DAWN_VALIDATION_ERROR("WGSL not supported (yet)");
+#ifdef DAWN_ENABLE_WGSL
+ const auto* wgslDesc =
+ static_cast<const ShaderModuleWGSLDescriptor*>(chainedDescriptor);
+ DAWN_TRY(ValidateWGSL(wgslDesc->source));
break;
+#else
+ return DAWN_VALIDATION_ERROR("WGSL not supported (yet)");
+#endif // DAWN_ENABLE_WGSL
}
-
default:
return DAWN_VALIDATION_ERROR("Unsupported sType");
}
@@ -351,13 +442,26 @@ namespace dawn_native {
// ShaderModuleBase
ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
- : CachedObject(device) {
+ : CachedObject(device), mType(Type::Undefined) {
ASSERT(descriptor->nextInChain != nullptr);
- ASSERT(descriptor->nextInChain->sType == wgpu::SType::ShaderModuleSPIRVDescriptor);
-
- const ShaderModuleSPIRVDescriptor* spirvDesc =
- static_cast<const ShaderModuleSPIRVDescriptor*>(descriptor->nextInChain);
- mSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ switch (descriptor->nextInChain->sType) {
+ case wgpu::SType::ShaderModuleSPIRVDescriptor: {
+ mType = Type::Spirv;
+ const auto* spirvDesc =
+ static_cast<const ShaderModuleSPIRVDescriptor*>(descriptor->nextInChain);
+ mSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ break;
+ }
+ case wgpu::SType::ShaderModuleWGSLDescriptor: {
+ mType = Type::Wgsl;
+ const auto* wgslDesc =
+ static_cast<const ShaderModuleWGSLDescriptor*>(descriptor->nextInChain);
+ mWgsl = std::string(wgslDesc->source);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
mFragmentOutputFormatBaseTypes.fill(Format::Other);
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvcParser)) {
@@ -366,7 +470,7 @@ namespace dawn_native {
}
ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : CachedObject(device, tag) {
+ : CachedObject(device, tag), mType(Type::Undefined) {
}
ShaderModuleBase::~ShaderModuleBase() {
@@ -411,12 +515,14 @@ namespace dawn_native {
auto ExtractResourcesBinding =
[this](std::vector<shaderc_spvc_binding_info> bindings) -> MaybeError {
for (const auto& binding : bindings) {
- if (binding.set >= kMaxBindGroups) {
+ BindGroupIndex bindGroupIndex(binding.set);
+
+ if (bindGroupIndex >= kMaxBindGroupsTyped) {
return DAWN_VALIDATION_ERROR("Bind group index over limits in the SPIRV");
}
- const auto& it = mBindingInfo[binding.set].emplace(BindingNumber(binding.binding),
- ShaderBindingInfo{});
+ const auto& it = mBindingInfo[bindGroupIndex].emplace(
+ BindingNumber(binding.binding), ShaderBindingInfo{});
if (!it.second) {
return DAWN_VALIDATION_ERROR("Shader has duplicate bindings");
}
@@ -454,6 +560,11 @@ namespace dawn_native {
info->viewDimension = ToWGPUTextureViewDimension(binding.texture_dimension);
break;
}
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ info->minBufferBindingSize = binding.minimum_buffer_size;
+ break;
default:
break;
}
@@ -572,6 +683,10 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Push constants aren't supported.");
}
+ if (resources.sampled_images.size() > 0) {
+ return DAWN_VALIDATION_ERROR("Combined images and samplers aren't supported.");
+ }
+
// Fill in bindingInfo with the SPIRV bindings
auto ExtractResourcesBinding =
[this](const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
@@ -589,13 +704,15 @@ namespace dawn_native {
BindingNumber bindingNumber(
compiler.get_decoration(resource.id, spv::DecorationBinding));
- uint32_t set = compiler.get_decoration(resource.id, spv::DecorationDescriptorSet);
+ BindGroupIndex bindGroupIndex(
+ compiler.get_decoration(resource.id, spv::DecorationDescriptorSet));
- if (set >= kMaxBindGroups) {
+ if (bindGroupIndex >= kMaxBindGroupsTyped) {
return DAWN_VALIDATION_ERROR("Bind group index over limits in the SPIRV");
}
- const auto& it = mBindingInfo[set].emplace(bindingNumber, ShaderBindingInfo{});
+ const auto& it =
+ mBindingInfo[bindGroupIndex].emplace(bindingNumber, ShaderBindingInfo{});
if (!it.second) {
return DAWN_VALIDATION_ERROR("Shader has duplicate bindings");
}
@@ -604,6 +721,15 @@ namespace dawn_native {
info->id = resource.id;
info->base_type_id = resource.base_type_id;
+ if (bindingType == wgpu::BindingType::UniformBuffer ||
+ bindingType == wgpu::BindingType::StorageBuffer ||
+ bindingType == wgpu::BindingType::ReadonlyStorageBuffer) {
+ // Determine buffer size, with a minimum of 1 element in the runtime array
+ spirv_cross::SPIRType type = compiler.get_type(info->base_type_id);
+ info->minBufferBindingSize =
+ compiler.get_declared_struct_size_runtime_array(type, 1);
+ }
+
switch (bindingType) {
case wgpu::BindingType::SampledTexture: {
spirv_cross::SPIRType::ImageType imageType =
@@ -759,26 +885,70 @@ namespace dawn_native {
return mExecutionModel;
}
- bool ShaderModuleBase::IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout) const {
- ASSERT(!IsError());
+ RequiredBufferSizes ShaderModuleBase::ComputeRequiredBufferSizesForLayout(
+ const PipelineLayoutBase* layout) const {
+ RequiredBufferSizes bufferSizes;
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ bufferSizes[group] =
+ GetBindGroupMinBufferSizes(mBindingInfo[group], layout->GetBindGroupLayout(group));
+ }
- for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- if (!IsCompatibleWithBindGroupLayout(group, layout->GetBindGroupLayout(group))) {
- return false;
+ return bufferSizes;
+ }
+
+ std::vector<uint64_t> ShaderModuleBase::GetBindGroupMinBufferSizes(
+ const BindingInfoMap& shaderMap,
+ const BindGroupLayoutBase* layout) const {
+ std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
+ uint32_t packedIdx = 0;
+
+ for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBufferCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
+ if (bindingInfo.minBufferBindingSize != 0) {
+ // Skip bindings that have minimum buffer size set in the layout
+ continue;
}
+
+ ASSERT(packedIdx < requiredBufferSizes.size());
+ const auto& shaderInfo = shaderMap.find(bindingInfo.binding);
+ if (shaderInfo != shaderMap.end()) {
+ requiredBufferSizes[packedIdx] = shaderInfo->second.minBufferBindingSize;
+ } else {
+ // We have to include buffers if they are included in the bind group's
+ // packed vector. We don't actually need to check these at draw time, so
+ // if this is a problem in the future we can optimize it further.
+ requiredBufferSizes[packedIdx] = 0;
+ }
+ ++packedIdx;
+ }
+
+ return requiredBufferSizes;
+ }
+
+ MaybeError ShaderModuleBase::ValidateCompatibilityWithPipelineLayout(
+ const PipelineLayoutBase* layout) const {
+ ASSERT(!IsError());
+
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ DAWN_TRY(
+ ValidateCompatibilityWithBindGroupLayout(group, layout->GetBindGroupLayout(group)));
}
- for (uint32_t group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
+ for (BindGroupIndex group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
if (mBindingInfo[group].size() > 0) {
- return false;
+ std::ostringstream ostream;
+ ostream << "No bind group layout entry matches the declaration set "
+ << static_cast<uint32_t>(group) << " in the shader module";
+ return DAWN_VALIDATION_ERROR(ostream.str());
}
}
- return true;
+ return {};
}
- bool ShaderModuleBase::IsCompatibleWithBindGroupLayout(
- size_t group,
+ MaybeError ShaderModuleBase::ValidateCompatibilityWithBindGroupLayout(
+ BindGroupIndex group,
const BindGroupLayoutBase* layout) const {
ASSERT(!IsError());
@@ -792,7 +962,8 @@ namespace dawn_native {
const auto& bindingIt = bindingMap.find(bindingNumber);
if (bindingIt == bindingMap.end()) {
- return false;
+ return DAWN_VALIDATION_ERROR("Missing bind group layout entry for " +
+ GetShaderDeclarationString(group, bindingNumber));
}
BindingIndex bindingIndex(bindingIt->second);
@@ -817,22 +988,32 @@ namespace dawn_native {
moduleInfo.type == wgpu::BindingType::Sampler);
if (!validBindingConversion) {
- return false;
+ return DAWN_VALIDATION_ERROR(
+ "The binding type of the bind group layout entry conflicts " +
+ GetShaderDeclarationString(group, bindingNumber));
}
}
if ((bindingInfo.visibility & StageBit(mExecutionModel)) == 0) {
- return false;
+ return DAWN_VALIDATION_ERROR("The bind group layout entry for " +
+ GetShaderDeclarationString(group, bindingNumber) +
+ " is not visible for the shader stage");
}
switch (bindingInfo.type) {
case wgpu::BindingType::SampledTexture: {
if (bindingInfo.textureComponentType != moduleInfo.textureComponentType) {
- return false;
+ return DAWN_VALIDATION_ERROR(
+ "The textureComponentType of the bind group layout entry is different "
+ "from " +
+ GetShaderDeclarationString(group, bindingNumber));
}
if (bindingInfo.viewDimension != moduleInfo.viewDimension) {
- return false;
+ return DAWN_VALIDATION_ERROR(
+ "The viewDimension of the bind group layout entry is different "
+ "from " +
+ GetShaderDeclarationString(group, bindingNumber));
}
break;
}
@@ -842,17 +1023,32 @@ namespace dawn_native {
ASSERT(bindingInfo.storageTextureFormat != wgpu::TextureFormat::Undefined);
ASSERT(moduleInfo.storageTextureFormat != wgpu::TextureFormat::Undefined);
if (bindingInfo.storageTextureFormat != moduleInfo.storageTextureFormat) {
- return false;
+ return DAWN_VALIDATION_ERROR(
+ "The storageTextureFormat of the bind group layout entry is different "
+ "from " +
+ GetShaderDeclarationString(group, bindingNumber));
}
if (bindingInfo.viewDimension != moduleInfo.viewDimension) {
- return false;
+ return DAWN_VALIDATION_ERROR(
+ "The viewDimension of the bind group layout entry is different "
+ "from " +
+ GetShaderDeclarationString(group, bindingNumber));
}
break;
}
case wgpu::BindingType::UniformBuffer:
case wgpu::BindingType::ReadonlyStorageBuffer:
- case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::StorageBuffer: {
+ if (bindingInfo.minBufferBindingSize != 0 &&
+ moduleInfo.minBufferBindingSize > bindingInfo.minBufferBindingSize) {
+ return DAWN_VALIDATION_ERROR(
+ "The minimum buffer size of the bind group layout entry is smaller "
+ "than " +
+ GetShaderDeclarationString(group, bindingNumber));
+ }
+ break;
+ }
case wgpu::BindingType::Sampler:
case wgpu::BindingType::ComparisonSampler:
break;
@@ -860,11 +1056,11 @@ namespace dawn_native {
case wgpu::BindingType::StorageTexture:
default:
UNREACHABLE();
- return false;
+ return DAWN_VALIDATION_ERROR("Unsupported binding type");
}
}
- return true;
+ return {};
}
size_t ShaderModuleBase::HashFunc::operator()(const ShaderModuleBase* module) const {
@@ -904,4 +1100,15 @@ namespace dawn_native {
return options;
}
+ MaybeError ShaderModuleBase::InitializeBase() {
+ if (mType == Type::Wgsl) {
+#ifdef DAWN_ENABLE_WGSL
+ DAWN_TRY_ASSIGN(mSpirv, ConvertWGSLToSPIRV(mWgsl.c_str()));
+#else
+ return DAWN_VALIDATION_ERROR("WGSL not supported (yet)");
+#endif // DAWN_ENABLE_WGSL
+ }
+
+ return {};
+ }
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
index b3fd83974b0..4e771aa8335 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
@@ -16,6 +16,7 @@
#define DAWNNATIVE_SHADERMODULE_H_
#include "common/Constants.h"
+#include "common/ityp_array.h"
#include "dawn_native/BindingInfo.h"
#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
@@ -27,7 +28,6 @@
#include "spvc/spvc.hpp"
-#include <array>
#include <bitset>
#include <map>
#include <vector>
@@ -43,6 +43,8 @@ namespace dawn_native {
class ShaderModuleBase : public CachedObject {
public:
+ enum class Type { Undefined, Spirv, Wgsl };
+
ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
~ShaderModuleBase() override;
@@ -61,8 +63,8 @@ namespace dawn_native {
using BindingInfo::visibility;
};
- using ModuleBindingInfo =
- std::array<std::map<BindingNumber, ShaderBindingInfo>, kMaxBindGroups>;
+ using BindingInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
+ using ModuleBindingInfo = ityp::array<BindGroupIndex, BindingInfoMap, kMaxBindGroups>;
const ModuleBindingInfo& GetBindingInfo() const;
const std::bitset<kMaxVertexAttributes>& GetUsedVertexAttributes() const;
@@ -73,7 +75,10 @@ namespace dawn_native {
using FragmentOutputBaseTypes = std::array<Format::Type, kMaxColorAttachments>;
const FragmentOutputBaseTypes& GetFragmentOutputBaseTypes() const;
- bool IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout) const;
+ MaybeError ValidateCompatibilityWithPipelineLayout(const PipelineLayoutBase* layout) const;
+
+ RequiredBufferSizes ComputeRequiredBufferSizesForLayout(
+ const PipelineLayoutBase* layout) const;
// Functors necessary for the unordered_set<ShaderModuleBase*>-based cache.
struct HashFunc {
@@ -89,20 +94,28 @@ namespace dawn_native {
protected:
static MaybeError CheckSpvcSuccess(shaderc_spvc_status status, const char* error_msg);
shaderc_spvc::CompileOptions GetCompileOptions() const;
+ MaybeError InitializeBase();
shaderc_spvc::Context mSpvcContext;
private:
ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- bool IsCompatibleWithBindGroupLayout(size_t group, const BindGroupLayoutBase* layout) const;
+ MaybeError ValidateCompatibilityWithBindGroupLayout(
+ BindGroupIndex group,
+ const BindGroupLayoutBase* layout) const;
+
+ std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingInfoMap& shaderMap,
+ const BindGroupLayoutBase* layout) const;
// Different implementations reflection into the shader depending on
// whether using spvc, or directly accessing spirv-cross.
MaybeError ExtractSpirvInfoWithSpvc();
MaybeError ExtractSpirvInfoWithSpirvCross(const spirv_cross::Compiler& compiler);
+ Type mType;
std::vector<uint32_t> mSpirv;
+ std::string mWgsl;
ModuleBindingInfo mBindingInfo;
std::bitset<kMaxVertexAttributes> mUsedVertexAttributes;
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
index d90c9758741..6358567f4f2 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
@@ -102,7 +102,6 @@ namespace dawn_native {
desc.usage = swapChain->GetUsage();
desc.dimension = wgpu::TextureDimension::e2D;
desc.size = {swapChain->GetWidth(), swapChain->GetHeight(), 1};
- desc.arrayLayerCount = 1;
desc.format = swapChain->GetFormat();
desc.mipLevelCount = 1;
desc.sampleCount = 1;
@@ -181,7 +180,6 @@ namespace dawn_native {
descriptor.size.width = mWidth;
descriptor.size.height = mHeight;
descriptor.size.depth = 1;
- descriptor.arrayLayerCount = 1;
descriptor.sampleCount = 1;
descriptor.format = mFormat;
descriptor.mipLevelCount = 1;
@@ -201,7 +199,7 @@ namespace dawn_native {
}
ASSERT(!IsError());
- if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTexture.Get()))) {
+ if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTextureView.Get()))) {
return;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.h b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
index 39976119729..6e32e534048 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.h
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
@@ -68,7 +68,7 @@ namespace dawn_native {
~OldSwapChainBase() override;
const DawnSwapChainImplementation& GetImplementation();
virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
- virtual MaybeError OnBeforePresent(TextureBase* texture) = 0;
+ virtual MaybeError OnBeforePresent(TextureViewBase* view) = 0;
private:
MaybeError ValidateConfigure(wgpu::TextureFormat format,
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.cpp b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
index 1861c0c4f20..f029981fb04 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
@@ -101,8 +101,9 @@ namespace dawn_native {
// Multisampled 2D array texture is not supported because on Metal it requires the
// version of macOS be greater than 10.14.
- if (descriptor->arrayLayerCount > 1) {
- return DAWN_VALIDATION_ERROR("Multisampled 2D array texture is not supported.");
+ if (descriptor->size.depth > 1) {
+ return DAWN_VALIDATION_ERROR(
+ "Multisampled textures with depth > 1 are not supported.");
}
if (format->isCompressed) {
@@ -163,7 +164,8 @@ namespace dawn_native {
"The size of the texture is incompatible with the texture format");
}
- if (descriptor->arrayLayerCount > kMaxTexture2DArrayLayers) {
+ if (descriptor->dimension == wgpu::TextureDimension::e2D &&
+ descriptor->size.depth > kMaxTexture2DArrayLayers) {
return DAWN_VALIDATION_ERROR("Texture 2D array layer count exceeded");
}
if (descriptor->mipLevelCount > kMaxTexture2DMipLevels) {
@@ -218,8 +220,7 @@ namespace dawn_native {
// TODO(jiawei.shao@intel.com): check stuff based on the dimension
if (descriptor->size.width == 0 || descriptor->size.height == 0 ||
- descriptor->size.depth == 0 || descriptor->arrayLayerCount == 0 ||
- descriptor->mipLevelCount == 0) {
+ descriptor->size.depth == 0 || descriptor->mipLevelCount == 0) {
return DAWN_VALIDATION_ERROR("Cannot create an empty texture");
}
@@ -326,6 +327,25 @@ namespace dawn_native {
return desc;
}
+ ResultOrError<TextureDescriptor> FixTextureDescriptor(DeviceBase* device,
+ const TextureDescriptor* desc) {
+ TextureDescriptor fixedDesc = *desc;
+
+ if (desc->arrayLayerCount != 1) {
+ if (desc->size.depth != 1) {
+ return DAWN_VALIDATION_ERROR("arrayLayerCount and size.depth cannot both be != 1");
+ } else {
+ fixedDesc.size.depth = fixedDesc.arrayLayerCount;
+ fixedDesc.arrayLayerCount = 1;
+ device->EmitDeprecationWarning(
+ "wgpu::TextureDescriptor::arrayLayerCount is deprecated in favor of "
+ "::size::depth");
+ }
+ }
+
+ return {std::move(fixedDesc)};
+ }
+
bool IsValidSampleCount(uint32_t sampleCount) {
switch (sampleCount) {
case 1:
@@ -337,6 +357,12 @@ namespace dawn_native {
}
}
+ // static
+ SubresourceRange SubresourceRange::SingleSubresource(uint32_t baseMipLevel,
+ uint32_t baseArrayLayer) {
+ return {baseMipLevel, 1, baseArrayLayer, 1};
+ }
+
// TextureBase
TextureBase::TextureBase(DeviceBase* device,
@@ -346,13 +372,11 @@ namespace dawn_native {
mDimension(descriptor->dimension),
mFormat(device->GetValidInternalFormat(descriptor->format)),
mSize(descriptor->size),
- mArrayLayerCount(descriptor->arrayLayerCount),
mMipLevelCount(descriptor->mipLevelCount),
mSampleCount(descriptor->sampleCount),
mUsage(descriptor->usage),
mState(state) {
- uint32_t subresourceCount =
- GetSubresourceIndex(descriptor->mipLevelCount, descriptor->arrayLayerCount);
+ uint32_t subresourceCount = GetSubresourceCount();
mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
// Add readonly storage usage if the texture has a storage usage. The validation rules in
@@ -387,18 +411,43 @@ namespace dawn_native {
ASSERT(!IsError());
return mSize;
}
+ uint32_t TextureBase::GetWidth() const {
+ ASSERT(!IsError());
+ return mSize.width;
+ }
+ uint32_t TextureBase::GetHeight() const {
+ ASSERT(!IsError());
+ ASSERT(mDimension == wgpu::TextureDimension::e2D ||
+ mDimension == wgpu::TextureDimension::e3D);
+ return mSize.height;
+ }
+ uint32_t TextureBase::GetDepth() const {
+ ASSERT(!IsError());
+ ASSERT(mDimension == wgpu::TextureDimension::e3D);
+ return mSize.depth;
+ }
uint32_t TextureBase::GetArrayLayers() const {
ASSERT(!IsError());
- return mArrayLayerCount;
+ // TODO(cwallez@chromium.org): Update for 1D / 3D textures when they are supported.
+ ASSERT(mDimension == wgpu::TextureDimension::e2D);
+ return mSize.depth;
}
uint32_t TextureBase::GetNumMipLevels() const {
ASSERT(!IsError());
return mMipLevelCount;
}
+ SubresourceRange TextureBase::GetAllSubresources() const {
+ ASSERT(!IsError());
+ return {0, mMipLevelCount, 0, GetArrayLayers()};
+ }
uint32_t TextureBase::GetSampleCount() const {
ASSERT(!IsError());
return mSampleCount;
}
+ uint32_t TextureBase::GetSubresourceCount() const {
+ ASSERT(!IsError());
+ return mMipLevelCount * mSize.depth;
+ }
wgpu::TextureUsage TextureBase::GetUsage() const {
ASSERT(!IsError());
return mUsage;
@@ -418,14 +467,12 @@ namespace dawn_native {
return GetNumMipLevels() * arraySlice + mipLevel;
}
- bool TextureBase::IsSubresourceContentInitialized(uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount) const {
+ bool TextureBase::IsSubresourceContentInitialized(const SubresourceRange& range) const {
ASSERT(!IsError());
- for (uint32_t mipLevel = baseMipLevel; mipLevel < baseMipLevel + levelCount; ++mipLevel) {
- for (uint32_t arrayLayer = baseArrayLayer; arrayLayer < baseArrayLayer + layerCount;
- ++arrayLayer) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = range.baseMipLevel;
+ mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer);
ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
@@ -437,14 +484,12 @@ namespace dawn_native {
}
void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount) {
+ const SubresourceRange& range) {
ASSERT(!IsError());
- for (uint32_t mipLevel = baseMipLevel; mipLevel < baseMipLevel + levelCount; ++mipLevel) {
- for (uint32_t arrayLayer = baseArrayLayer; arrayLayer < baseArrayLayer + layerCount;
- ++arrayLayer) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = range.baseMipLevel;
+ mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer);
ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
@@ -466,9 +511,16 @@ namespace dawn_native {
}
Extent3D TextureBase::GetMipLevelVirtualSize(uint32_t level) const {
- Extent3D extent;
- extent.width = std::max(mSize.width >> level, 1u);
+ Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u};
+ if (mDimension == wgpu::TextureDimension::e1D) {
+ return extent;
+ }
+
extent.height = std::max(mSize.height >> level, 1u);
+ if (mDimension == wgpu::TextureDimension::e2D) {
+ return extent;
+ }
+
extent.depth = std::max(mSize.depth >> level, 1u);
return extent;
}
@@ -521,10 +573,8 @@ namespace dawn_native {
mTexture(texture),
mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
mDimension(descriptor->dimension),
- mBaseMipLevel(descriptor->baseMipLevel),
- mMipLevelCount(descriptor->mipLevelCount),
- mBaseArrayLayer(descriptor->baseArrayLayer),
- mArrayLayerCount(descriptor->arrayLayerCount) {
+ mRange({descriptor->baseMipLevel, descriptor->mipLevelCount, descriptor->baseArrayLayer,
+ descriptor->arrayLayerCount}) {
}
TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -558,21 +608,26 @@ namespace dawn_native {
uint32_t TextureViewBase::GetBaseMipLevel() const {
ASSERT(!IsError());
- return mBaseMipLevel;
+ return mRange.baseMipLevel;
}
uint32_t TextureViewBase::GetLevelCount() const {
ASSERT(!IsError());
- return mMipLevelCount;
+ return mRange.levelCount;
}
uint32_t TextureViewBase::GetBaseArrayLayer() const {
ASSERT(!IsError());
- return mBaseArrayLayer;
+ return mRange.baseArrayLayer;
}
uint32_t TextureViewBase::GetLayerCount() const {
ASSERT(!IsError());
- return mArrayLayerCount;
+ return mRange.layerCount;
+ }
+
+ const SubresourceRange& TextureViewBase::GetSubresourceRange() const {
+ ASSERT(!IsError());
+ return mRange;
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.h b/chromium/third_party/dawn/src/dawn_native/Texture.h
index d14696cd7ac..29007941344 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.h
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.h
@@ -32,6 +32,11 @@ namespace dawn_native {
const TextureBase* texture,
const TextureViewDescriptor* descriptor);
+ // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
+ // GPUTextureDescriptor.size.depth is done.
+ ResultOrError<TextureDescriptor> FixTextureDescriptor(DeviceBase* device,
+ const TextureDescriptor* desc);
+
bool IsValidSampleCount(uint32_t sampleCount);
static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
@@ -41,6 +46,15 @@ namespace dawn_native {
wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::Storage |
wgpu::TextureUsage::OutputAttachment;
+ struct SubresourceRange {
+ uint32_t baseMipLevel;
+ uint32_t levelCount;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+
+ static SubresourceRange SingleSubresource(uint32_t baseMipLevel, uint32_t baseArrayLayer);
+ };
+
class TextureBase : public ObjectBase {
public:
enum class TextureState { OwnedInternal, OwnedExternal, Destroyed };
@@ -52,21 +66,19 @@ namespace dawn_native {
wgpu::TextureDimension GetDimension() const;
const Format& GetFormat() const;
const Extent3D& GetSize() const;
+ uint32_t GetWidth() const;
+ uint32_t GetHeight() const;
+ uint32_t GetDepth() const;
uint32_t GetArrayLayers() const;
uint32_t GetNumMipLevels() const;
+ SubresourceRange GetAllSubresources() const;
uint32_t GetSampleCount() const;
+ uint32_t GetSubresourceCount() const;
wgpu::TextureUsage GetUsage() const;
TextureState GetTextureState() const;
uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice) const;
- bool IsSubresourceContentInitialized(uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount) const;
- void SetIsSubresourceContentInitialized(bool isInitialized,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount);
+ bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
+ void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
MaybeError ValidateCanUseInSubmitNow() const;
@@ -96,7 +108,6 @@ namespace dawn_native {
// TODO(cwallez@chromium.org): This should be deduplicated in the Device
const Format& mFormat;
Extent3D mSize;
- uint32_t mArrayLayerCount;
uint32_t mMipLevelCount;
uint32_t mSampleCount;
wgpu::TextureUsage mUsage = wgpu::TextureUsage::None;
@@ -121,6 +132,7 @@ namespace dawn_native {
uint32_t GetLevelCount() const;
uint32_t GetBaseArrayLayer() const;
uint32_t GetLayerCount() const;
+ const SubresourceRange& GetSubresourceRange() const;
private:
TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag);
@@ -130,10 +142,7 @@ namespace dawn_native {
// TODO(cwallez@chromium.org): This should be deduplicated in the Device
const Format& mFormat;
wgpu::TextureViewDimension mDimension;
- uint32_t mBaseMipLevel;
- uint32_t mMipLevelCount;
- uint32_t mBaseArrayLayer;
- uint32_t mArrayLayerCount;
+ SubresourceRange mRange;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ToBackend.h b/chromium/third_party/dawn/src/dawn_native/ToBackend.h
index b9940aba2b6..3cc071580c2 100644
--- a/chromium/third_party/dawn/src/dawn_native/ToBackend.h
+++ b/chromium/third_party/dawn/src/dawn_native/ToBackend.h
@@ -64,6 +64,11 @@ namespace dawn_native {
};
template <typename BackendTraits>
+ struct ToBackendTraits<QuerySetBase, BackendTraits> {
+ using BackendType = typename BackendTraits::QuerySetType;
+ };
+
+ template <typename BackendTraits>
struct ToBackendTraits<QueueBase, BackendTraits> {
using BackendType = typename BackendTraits::QueueType;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
index 8e665d0cf1e..68fa27c2e5e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
@@ -15,7 +15,9 @@
#include "dawn_native/d3d12/AdapterD3D12.h"
#include "common/Constants.h"
+#include "dawn_native/Instance.h"
#include "dawn_native/d3d12/BackendD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
@@ -40,6 +42,10 @@ namespace dawn_native { namespace d3d12 {
mBackend(backend) {
}
+ Adapter::~Adapter() {
+ CleanUpDebugLayerFilters();
+ }
+
const D3D12DeviceInfo& Adapter::GetDeviceInfo() const {
return mDeviceInfo;
}
@@ -66,6 +72,8 @@ namespace dawn_native { namespace d3d12 {
return DAWN_INTERNAL_ERROR("D3D12CreateDevice failed");
}
+ DAWN_TRY(InitializeDebugLayerFilters());
+
DXGI_ADAPTER_DESC1 adapterDesc;
mHardwareAdapter->GetDesc1(&adapterDesc);
@@ -92,6 +100,84 @@ namespace dawn_native { namespace d3d12 {
void Adapter::InitializeSupportedExtensions() {
mSupportedExtensions.EnableExtension(Extension::TextureCompressionBC);
+ mSupportedExtensions.EnableExtension(Extension::PipelineStatisticsQuery);
+ mSupportedExtensions.EnableExtension(Extension::TimestampQuery);
+ }
+
+ MaybeError Adapter::InitializeDebugLayerFilters() {
+ if (!GetInstance()->IsBackendValidationEnabled()) {
+ return {};
+ }
+
+ D3D12_MESSAGE_ID denyIds[] = {
+
+ //
+ // Permanent IDs: list of warnings that are not applicable
+ //
+
+ // Resource sub-allocation partially maps pre-allocated heaps. This means the
+ // entire physical addresses space may have no resources or have many resources
+ // assigned the same heap.
+ D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_HAS_NO_RESOURCE,
+ D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_INTERSECTS_MULTIPLE_BUFFERS,
+
+ // The debug layer validates pipeline objects when they are created. Dawn validates
+ // them when them when they are set. Therefore, since the issue is caught at a later
+ // time, we can silence this warnings.
+ D3D12_MESSAGE_ID_CREATEGRAPHICSPIPELINESTATE_RENDERTARGETVIEW_NOT_SET,
+
+ // Adding a clear color during resource creation would require heuristics or delayed
+ // creation.
+ // https://crbug.com/dawn/418
+ D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE,
+ D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE,
+
+ // Dawn enforces proper Unmaps at a later time.
+ // https://crbug.com/dawn/422
+ D3D12_MESSAGE_ID_EXECUTECOMMANDLISTS_GPU_WRITTEN_READBACK_RESOURCE_MAPPED,
+
+ //
+ // Temporary IDs: list of warnings that should be fixed or promoted
+ //
+
+ // Remove after warning have been addressed
+ // https://crbug.com/dawn/419
+ D3D12_MESSAGE_ID_UNMAP_RANGE_NOT_EMPTY,
+
+ // Remove after warning have been addressed
+ // https://crbug.com/dawn/421
+ D3D12_MESSAGE_ID_GPU_BASED_VALIDATION_INCOMPATIBLE_RESOURCE_STATE,
+ };
+
+ // Create a retrieval filter with a deny list to suppress messages.
+ // Any messages remaining will be converted to Dawn errors.
+ D3D12_INFO_QUEUE_FILTER filter{};
+ // Filter out info/message and only create errors from warnings or worse.
+ D3D12_MESSAGE_SEVERITY severities[] = {
+ D3D12_MESSAGE_SEVERITY_INFO,
+ D3D12_MESSAGE_SEVERITY_MESSAGE,
+ };
+ filter.DenyList.NumSeverities = ARRAYSIZE(severities);
+ filter.DenyList.pSeverityList = severities;
+ filter.DenyList.NumIDs = ARRAYSIZE(denyIds);
+ filter.DenyList.pIDList = denyIds;
+
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ ASSERT_SUCCESS(mD3d12Device.As(&infoQueue));
+
+ DAWN_TRY(CheckHRESULT(infoQueue->PushRetrievalFilter(&filter),
+ "ID3D12InfoQueue::PushRetrievalFilter"));
+
+ return {};
+ }
+
+ void Adapter::CleanUpDebugLayerFilters() {
+ if (!GetInstance()->IsBackendValidationEnabled()) {
+ return;
+ }
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ ASSERT_SUCCESS(mD3d12Device.As(&infoQueue));
+ infoQueue->PopRetrievalFilter();
}
ResultOrError<DeviceBase*> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
index 48345c11006..e0910bffa55 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
@@ -27,7 +27,7 @@ namespace dawn_native { namespace d3d12 {
class Adapter : public AdapterBase {
public:
Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter);
- ~Adapter() override = default;
+ ~Adapter() override;
const D3D12DeviceInfo& GetDeviceInfo() const;
IDXGIAdapter3* GetHardwareAdapter() const;
@@ -39,6 +39,8 @@ namespace dawn_native { namespace d3d12 {
private:
ResultOrError<DeviceBase*> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
void InitializeSupportedExtensions();
+ MaybeError InitializeDebugLayerFilters();
+ void CleanUpDebugLayerFilters();
ComPtr<IDXGIAdapter3> mHardwareAdapter;
ComPtr<ID3D12Device> mD3d12Device;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
index eea97206263..46dc1b67921 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
@@ -17,6 +17,7 @@
#include "dawn_native/D3D12Backend.h"
#include "dawn_native/Instance.h"
#include "dawn_native/d3d12/AdapterD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
namespace dawn_native { namespace d3d12 {
@@ -33,11 +34,12 @@ namespace dawn_native { namespace d3d12 {
// Enable the debug layer (requires the Graphics Tools "optional feature").
{
if (enableBackendValidation) {
- ComPtr<ID3D12Debug> debugController;
+ ComPtr<ID3D12Debug1> debugController;
if (SUCCEEDED(
functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
ASSERT(debugController != nullptr);
debugController->EnableDebugLayer();
+ debugController->SetEnableGPUBasedValidation(true);
// Enable additional debug layers.
dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
@@ -91,6 +93,26 @@ namespace dawn_native { namespace d3d12 {
return mFactory;
}
+ ResultOrError<IDxcLibrary*> Backend::GetOrCreateDxcLibrary() {
+ if (mDxcLibrary == nullptr) {
+ DAWN_TRY(CheckHRESULT(
+ mFunctions->dxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&mDxcLibrary)),
+ "DXC create library"));
+ ASSERT(mDxcLibrary != nullptr);
+ }
+ return mDxcLibrary.Get();
+ }
+
+ ResultOrError<IDxcCompiler*> Backend::GetOrCreateDxcCompiler() {
+ if (mDxcCompiler == nullptr) {
+ DAWN_TRY(CheckHRESULT(
+ mFunctions->dxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&mDxcCompiler)),
+ "DXC create compiler"));
+ ASSERT(mDxcCompiler != nullptr);
+ }
+ return mDxcCompiler.Get();
+ }
+
const PlatformFunctions* Backend::GetFunctions() const {
return mFunctions.get();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
index 3161048a088..27ef1d16a36 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
@@ -30,6 +30,8 @@ namespace dawn_native { namespace d3d12 {
MaybeError Initialize();
ComPtr<IDXGIFactory4> GetFactory() const;
+ ResultOrError<IDxcLibrary*> GetOrCreateDxcLibrary();
+ ResultOrError<IDxcCompiler*> GetOrCreateDxcCompiler();
const PlatformFunctions* GetFunctions() const;
std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override;
@@ -39,6 +41,8 @@ namespace dawn_native { namespace d3d12 {
// the D3D12 DLLs are unloaded before we are done using them.
std::unique_ptr<PlatformFunctions> mFunctions;
ComPtr<IDXGIFactory4> mFactory;
+ ComPtr<IDxcLibrary> mDxcLibrary;
+ ComPtr<IDxcCompiler> mDxcCompiler;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
index 5aeaf567c06..76fb0289796 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
@@ -18,7 +18,7 @@
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
#include "dawn_native/d3d12/BufferD3D12.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/SamplerD3D12.h"
+#include "dawn_native/d3d12/SamplerHeapCacheD3D12.h"
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/TextureD3D12.h"
@@ -33,14 +33,11 @@ namespace dawn_native { namespace d3d12 {
BindGroup::BindGroup(Device* device,
const BindGroupDescriptor* descriptor,
uint32_t viewSizeIncrement,
- const CPUDescriptorHeapAllocation& viewAllocation,
- uint32_t samplerSizeIncrement,
- const CPUDescriptorHeapAllocation& samplerAllocation)
+ const CPUDescriptorHeapAllocation& viewAllocation)
: BindGroupBase(this, device, descriptor) {
BindGroupLayout* bgl = ToBackend(GetLayout());
mCPUViewAllocation = viewAllocation;
- mCPUSamplerAllocation = samplerAllocation;
const auto& bindingOffsets = bgl->GetBindingOffsets();
@@ -129,11 +126,7 @@ namespace dawn_native { namespace d3d12 {
}
case wgpu::BindingType::Sampler:
case wgpu::BindingType::ComparisonSampler: {
- auto* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
- auto& samplerDesc = sampler->GetSamplerDescriptor();
- d3d12Device->CreateSampler(
- &samplerDesc, samplerAllocation.OffsetFrom(samplerSizeIncrement,
- bindingOffsets[bindingIndex]));
+ // No-op as samplers will be later initialized by CreateSamplers().
break;
}
@@ -156,32 +149,15 @@ namespace dawn_native { namespace d3d12 {
}
BindGroup::~BindGroup() {
- ToBackend(GetLayout())
- ->DeallocateBindGroup(this, &mCPUViewAllocation, &mCPUSamplerAllocation);
+ ToBackend(GetLayout())->DeallocateBindGroup(this, &mCPUViewAllocation);
ASSERT(!mCPUViewAllocation.IsValid());
- ASSERT(!mCPUSamplerAllocation.IsValid());
}
bool BindGroup::PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator) {
const BindGroupLayout* bgl = ToBackend(GetLayout());
- return Populate(viewAllocator, bgl->GetCbvUavSrvDescriptorCount(),
- D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, mCPUViewAllocation,
- &mGPUViewAllocation);
- }
- bool BindGroup::PopulateSamplers(ShaderVisibleDescriptorAllocator* samplerAllocator) {
- const BindGroupLayout* bgl = ToBackend(GetLayout());
- return Populate(samplerAllocator, bgl->GetSamplerDescriptorCount(),
- D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, mCPUSamplerAllocation,
- &mGPUSamplerAllocation);
- }
-
- bool BindGroup::Populate(ShaderVisibleDescriptorAllocator* allocator,
- uint32_t descriptorCount,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType,
- const CPUDescriptorHeapAllocation& stagingAllocation,
- GPUDescriptorHeapAllocation* allocation) {
- if (descriptorCount == 0 || allocator->IsAllocationStillValid(*allocation)) {
+ const uint32_t descriptorCount = bgl->GetCbvUavSrvDescriptorCount();
+ if (descriptorCount == 0 || viewAllocator->IsAllocationStillValid(mGPUViewAllocation)) {
return true;
}
@@ -190,16 +166,18 @@ namespace dawn_native { namespace d3d12 {
Device* device = ToBackend(GetDevice());
D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
- if (!allocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
- &baseCPUDescriptor, allocation)) {
+ if (!viewAllocator->AllocateGPUDescriptors(descriptorCount,
+ device->GetPendingCommandSerial(),
+ &baseCPUDescriptor, &mGPUViewAllocation)) {
return false;
}
// CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
// simple copies per bindgroup, a single non-simple copy could be issued.
// TODO(dawn:155): Consider doing this optimization.
- device->GetD3D12Device()->CopyDescriptorsSimple(
- descriptorCount, baseCPUDescriptor, stagingAllocation.GetBaseDescriptor(), heapType);
+ device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+ mCPUViewAllocation.GetBaseDescriptor(),
+ D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
return true;
}
@@ -209,6 +187,19 @@ namespace dawn_native { namespace d3d12 {
}
D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
- return mGPUSamplerAllocation.GetBaseDescriptor();
+ ASSERT(mSamplerAllocationEntry.Get() != nullptr);
+ return mSamplerAllocationEntry->GetBaseDescriptor();
+ }
+
+ bool BindGroup::PopulateSamplers(Device* device,
+ ShaderVisibleDescriptorAllocator* samplerAllocator) {
+ if (mSamplerAllocationEntry.Get() == nullptr) {
+ return true;
+ }
+ return mSamplerAllocationEntry->Populate(device, samplerAllocator);
+ }
+
+ void BindGroup::SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry) {
+ mSamplerAllocationEntry = std::move(entry);
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
index 05d67b4ba3b..54acb3de38e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
@@ -24,7 +24,9 @@
namespace dawn_native { namespace d3d12 {
class Device;
+ class SamplerHeapCacheEntry;
class ShaderVisibleDescriptorAllocator;
+ class StagingDescriptorAllocator;
class BindGroup final : public BindGroupBase, public PlacementAllocated {
public:
@@ -34,30 +36,23 @@ namespace dawn_native { namespace d3d12 {
BindGroup(Device* device,
const BindGroupDescriptor* descriptor,
uint32_t viewSizeIncrement,
- const CPUDescriptorHeapAllocation& viewAllocation,
- uint32_t samplerSizeIncrement,
- const CPUDescriptorHeapAllocation& samplerAllocation);
+ const CPUDescriptorHeapAllocation& viewAllocation);
// Returns true if the BindGroup was successfully populated.
bool PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator);
- bool PopulateSamplers(ShaderVisibleDescriptorAllocator* samplerAllocator);
+ bool PopulateSamplers(Device* device, ShaderVisibleDescriptorAllocator* samplerAllocator);
D3D12_GPU_DESCRIPTOR_HANDLE GetBaseViewDescriptor() const;
D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
- private:
- bool Populate(ShaderVisibleDescriptorAllocator* allocator,
- uint32_t descriptorCount,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType,
- const CPUDescriptorHeapAllocation& stagingAllocation,
- GPUDescriptorHeapAllocation* allocation);
+ void SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry);
+ private:
~BindGroup() override;
- GPUDescriptorHeapAllocation mGPUSamplerAllocation;
- GPUDescriptorHeapAllocation mGPUViewAllocation;
+ Ref<SamplerHeapCacheEntry> mSamplerAllocationEntry;
- CPUDescriptorHeapAllocation mCPUSamplerAllocation;
+ GPUDescriptorHeapAllocation mGPUViewAllocation;
CPUDescriptorHeapAllocation mCPUViewAllocation;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
index c5c799942e7..9280f8c5f4f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
@@ -17,6 +17,7 @@
#include "common/BitSetIterator.h"
#include "dawn_native/d3d12/BindGroupD3D12.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/SamplerHeapCacheD3D12.h"
#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -102,7 +103,7 @@ namespace dawn_native { namespace d3d12 {
D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER);
descriptorOffsets[Sampler] = 0;
- for (BindingIndex bindingIndex = 0; bindingIndex < GetBindingCount(); ++bindingIndex) {
+ for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
if (bindingInfo.hasDynamicOffset) {
@@ -147,32 +148,30 @@ namespace dawn_native { namespace d3d12 {
viewSizeIncrement = mViewAllocator->GetSizeIncrement();
}
- uint32_t samplerSizeIncrement = 0;
- CPUDescriptorHeapAllocation samplerAllocation;
+ Ref<BindGroup> bindGroup = AcquireRef<BindGroup>(
+ mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation));
+
if (GetSamplerDescriptorCount() > 0) {
- DAWN_TRY_ASSIGN(samplerAllocation, mSamplerAllocator->AllocateCPUDescriptors());
- samplerSizeIncrement = mSamplerAllocator->GetSizeIncrement();
+ Ref<SamplerHeapCacheEntry> samplerHeapCacheEntry;
+ DAWN_TRY_ASSIGN(samplerHeapCacheEntry, device->GetSamplerHeapCache()->GetOrCreate(
+ bindGroup.Get(), mSamplerAllocator));
+ bindGroup->SetSamplerAllocationEntry(std::move(samplerHeapCacheEntry));
}
- return mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation,
- samplerSizeIncrement, samplerAllocation);
+ return bindGroup.Detach();
}
void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
- CPUDescriptorHeapAllocation* viewAllocation,
- CPUDescriptorHeapAllocation* samplerAllocation) {
+ CPUDescriptorHeapAllocation* viewAllocation) {
if (viewAllocation->IsValid()) {
mViewAllocator->Deallocate(viewAllocation);
}
- if (samplerAllocation->IsValid()) {
- mSamplerAllocator->Deallocate(samplerAllocation);
- }
-
mBindGroupAllocator.Deallocate(bindGroup);
}
- const std::array<uint32_t, kMaxBindingsPerGroup>& BindGroupLayout::GetBindingOffsets() const {
+ const ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup>&
+ BindGroupLayout::GetBindingOffsets() const {
return mBindingOffsets;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
index d04ab75039b..5a5ba8bce33 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
@@ -25,6 +25,7 @@ namespace dawn_native { namespace d3d12 {
class BindGroup;
class CPUDescriptorHeapAllocation;
class Device;
+ class SamplerHeapCacheEntry;
class StagingDescriptorAllocator;
class BindGroupLayout final : public BindGroupLayoutBase {
@@ -33,9 +34,7 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<BindGroup*> AllocateBindGroup(Device* device,
const BindGroupDescriptor* descriptor);
- void DeallocateBindGroup(BindGroup* bindGroup,
- CPUDescriptorHeapAllocation* viewAllocation,
- CPUDescriptorHeapAllocation* samplerAllocation);
+ void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
enum DescriptorType {
CBV,
@@ -45,7 +44,7 @@ namespace dawn_native { namespace d3d12 {
Count,
};
- const std::array<uint32_t, kMaxBindingsPerGroup>& GetBindingOffsets() const;
+ const ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup>& GetBindingOffsets() const;
uint32_t GetCbvUavSrvDescriptorTableSize() const;
uint32_t GetSamplerDescriptorTableSize() const;
uint32_t GetCbvUavSrvDescriptorCount() const;
@@ -55,7 +54,7 @@ namespace dawn_native { namespace d3d12 {
private:
~BindGroupLayout() override = default;
- std::array<uint32_t, kMaxBindingsPerGroup> mBindingOffsets;
+ ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup> mBindingOffsets;
std::array<uint32_t, DescriptorType::Count> mDescriptorCounts;
D3D12_DESCRIPTOR_RANGE mRanges[DescriptorType::Count];
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
index fa68ef7ac27..879709f4bae 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "common/Constants.h"
#include "common/Math.h"
+#include "dawn_native/DynamicUploader.h"
#include "dawn_native/d3d12/CommandRecordingContext.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
@@ -120,6 +121,11 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY_ASSIGN(
mResourceAllocation,
ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
+
+ if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ DAWN_TRY(ClearBuffer(ClearValue::NonZero));
+ }
+
return {};
}
@@ -233,76 +239,71 @@ namespace dawn_native { namespace d3d12 {
return mResourceAllocation.GetGPUPointer();
}
- void Buffer::OnMapCommandSerialFinished(uint32_t mapSerial, void* data, bool isWrite) {
- if (isWrite) {
- CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
- } else {
- CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
- }
- }
-
bool Buffer::IsMapWritable() const {
// TODO(enga): Handle CPU-visible memory on UMA
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
+ MaybeError Buffer::MapBufferInternal(D3D12_RANGE mappedRange,
+ void** mappedPointer,
+ const char* contextInfo) {
// The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
// evicted. This buffer should already have been made resident when it was created.
Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockHeap(heap));
+ DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
- mWrittenMappedRange = {0, static_cast<size_t>(GetSize())};
- DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &mWrittenMappedRange,
- reinterpret_cast<void**>(mappedPointer)),
- "D3D12 map at creation"));
+ DAWN_TRY(
+ CheckHRESULT(GetD3D12Resource()->Map(0, &mappedRange, mappedPointer), contextInfo));
return {};
}
- MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
- // The mapped buffer can be accessed at any time, so we must make the buffer resident and
- // lock it to ensure it is never evicted.
+ void Buffer::UnmapBufferInternal(D3D12_RANGE mappedRange) {
+ GetD3D12Resource()->Unmap(0, &mappedRange);
+
+ // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
+ // them when they are unmapped.
Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockHeap(heap));
+ ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
+ }
+
+ MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
+ mWrittenMappedRange = {0, static_cast<size_t>(GetSize())};
+ DAWN_TRY(MapBufferInternal(mWrittenMappedRange, reinterpret_cast<void**>(mappedPointer),
+ "D3D12 map at creation"));
+ mMappedData = reinterpret_cast<char*>(mappedPointer);
+ return {};
+ }
+ MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
mWrittenMappedRange = {};
D3D12_RANGE readRange = {0, static_cast<size_t>(GetSize())};
- char* data = nullptr;
- DAWN_TRY(
- CheckHRESULT(GetD3D12Resource()->Map(0, &readRange, reinterpret_cast<void**>(&data)),
- "D3D12 map read async"));
+ DAWN_TRY(MapBufferInternal(readRange, reinterpret_cast<void**>(&mMappedData),
+ "D3D12 map read async"));
+
// There is no need to transition the resource to a new state: D3D12 seems to make the GPU
// writes available when the fence is passed.
- MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapRequestTracker();
- tracker->Track(this, serial, data, false);
return {};
}
MaybeError Buffer::MapWriteAsyncImpl(uint32_t serial) {
- // The mapped buffer can be accessed at any time, so we must make the buffer resident and
- // lock it to ensure it is never evicted.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockHeap(heap));
-
mWrittenMappedRange = {0, static_cast<size_t>(GetSize())};
- char* data = nullptr;
- DAWN_TRY(CheckHRESULT(
- GetD3D12Resource()->Map(0, &mWrittenMappedRange, reinterpret_cast<void**>(&data)),
- "D3D12 map write async"));
+ DAWN_TRY(MapBufferInternal(mWrittenMappedRange, reinterpret_cast<void**>(&mMappedData),
+ "D3D12 map write async"));
+
// There is no need to transition the resource to a new state: D3D12 seems to make the CPU
// writes available on queue submission.
- MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapRequestTracker();
- tracker->Track(this, serial, data, true);
return {};
}
void Buffer::UnmapImpl() {
- GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
- // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
- // them when they are unmapped.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- ToBackend(GetDevice())->GetResidencyManager()->UnlockHeap(heap);
+ UnmapBufferInternal(mWrittenMappedRange);
+
mWrittenMappedRange = {};
+ mMappedData = nullptr;
+ }
+
+ void* Buffer::GetMappedPointerImpl() {
+ return mMappedData;
}
void Buffer::DestroyImpl() {
@@ -310,7 +311,7 @@ namespace dawn_native { namespace d3d12 {
// reference on its heap.
if (IsMapped()) {
Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- ToBackend(GetDevice())->GetResidencyManager()->UnlockHeap(heap);
+ ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
}
ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
@@ -325,29 +326,39 @@ namespace dawn_native { namespace d3d12 {
return mResourceAllocation.GetInfo().mMethod == allocationMethod;
}
- MapRequestTracker::MapRequestTracker(Device* device) : mDevice(device) {
- }
+ MaybeError Buffer::ClearBuffer(ClearValue clearValue) {
+ // TODO(jiawei.shao@intel.com): support buffer lazy-initialization to 0.
+ ASSERT(clearValue == BufferBase::ClearValue::NonZero);
+ constexpr uint8_t kClearBufferValue = 1u;
- MapRequestTracker::~MapRequestTracker() {
- ASSERT(mInflightRequests.Empty());
- }
+ Device* device = ToBackend(GetDevice());
- void MapRequestTracker::Track(Buffer* buffer, uint32_t mapSerial, void* data, bool isWrite) {
- Request request;
- request.buffer = buffer;
- request.mapSerial = mapSerial;
- request.data = data;
- request.isWrite = isWrite;
+ // The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
+ // changed away, so we can only clear such buffer with buffer mapping.
+ if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
+ uint8_t* mappedData = nullptr;
+ D3D12_RANGE writeRange = {0, static_cast<size_t>(GetSize())};
+ DAWN_TRY(MapBufferInternal(writeRange, reinterpret_cast<void**>(&mappedData),
+ "D3D12 map at clear buffer"));
- mInflightRequests.Enqueue(std::move(request), mDevice->GetPendingCommandSerial());
- }
+ memset(mappedData, kClearBufferValue, GetSize());
+
+ UnmapBufferInternal(writeRange);
+ mappedData = nullptr;
+ } else {
+ // TODO(jiawei.shao@intel.com): use ClearUnorderedAccessView*() when the buffer usage
+ // includes STORAGE.
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(GetSize(), device->GetPendingCommandSerial()));
+
+ memset(uploadHandle.mappedBuffer, kClearBufferValue, GetSize());
- void MapRequestTracker::Tick(Serial finishedSerial) {
- for (auto& request : mInflightRequests.IterateUpTo(finishedSerial)) {
- request.buffer->OnMapCommandSerialFinished(request.mapSerial, request.data,
- request.isWrite);
+ DAWN_TRY(device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer,
+ uploadHandle.startOffset, this, 0, GetSize()));
}
- mInflightRequests.ClearUpTo(finishedSerial);
- }
+ return {};
+ }
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
index 02ec4035dde..3c51b743946 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
@@ -34,7 +34,6 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12Resource> GetD3D12Resource() const;
D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
- void OnMapCommandSerialFinished(uint32_t mapSerial, void* data, bool isWrite);
bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
@@ -55,36 +54,24 @@ namespace dawn_native { namespace d3d12 {
bool IsMapWritable() const override;
virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ void* GetMappedPointerImpl() override;
+ MaybeError MapBufferInternal(D3D12_RANGE mappedRange,
+ void** mappedPointer,
+ const char* contextInfo);
+ void UnmapBufferInternal(D3D12_RANGE mappedRange);
bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
wgpu::BufferUsage newUsage);
+ MaybeError ClearBuffer(ClearValue clearValue);
+
ResourceHeapAllocation mResourceAllocation;
bool mFixedResourceState = false;
wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
Serial mLastUsedSerial = UINT64_MAX;
D3D12_RANGE mWrittenMappedRange;
- };
-
- class MapRequestTracker {
- public:
- MapRequestTracker(Device* device);
- ~MapRequestTracker();
-
- void Track(Buffer* buffer, uint32_t mapSerial, void* data, bool isWrite);
- void Tick(Serial finishedSerial);
-
- private:
- Device* mDevice;
-
- struct Request {
- Ref<Buffer> buffer;
- uint32_t mapSerial;
- void* data;
- bool isWrite;
- };
- SerialQueue<Request> mInflightRequests;
+ char* mMappedData = nullptr;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
index 0ec73aaa76d..a0df3b68730 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "dawn_native/BindGroupAndStorageBarrierTracker.h"
#include "dawn_native/CommandEncoder.h"
+#include "dawn_native/CommandValidation.h"
#include "dawn_native/Commands.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/d3d12/BindGroupD3D12.h"
@@ -30,6 +31,7 @@
#include "dawn_native/d3d12/RenderPassBuilderD3D12.h"
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
+#include "dawn_native/d3d12/SamplerHeapCacheD3D12.h"
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/TextureCopySplitter.h"
@@ -61,17 +63,6 @@ namespace dawn_native { namespace d3d12 {
const Extent3D& srcSize = src->GetSize();
const Extent3D& dstSize = dst->GetSize();
- auto GetCopyDepth = [](const Texture* texture) {
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- return 1u;
- case wgpu::TextureDimension::e2D:
- return texture->GetArrayLayers();
- case wgpu::TextureDimension::e3D:
- return texture->GetSize().depth;
- }
- };
-
// https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copyresource
// In order to use D3D12's copy resource, the textures must be the same dimensions, and
// the copy must be of the entire resource.
@@ -83,10 +74,10 @@ namespace dawn_native { namespace d3d12 {
// cannot use CopyResource.
copySize.width == dstSize.width && //
copySize.width == srcSize.width && //
- copySize.height == dstSize.height && //
- copySize.height == srcSize.height && //
- copySize.depth == GetCopyDepth(src) && //
- copySize.depth == GetCopyDepth(dst);
+ copySize.height == dstSize.height && //
+ copySize.height == srcSize.height && //
+ copySize.depth == dstSize.depth && //
+ copySize.depth == srcSize.depth;
}
} // anonymous namespace
@@ -95,6 +86,7 @@ namespace dawn_native { namespace d3d12 {
public:
BindGroupStateTracker(Device* device)
: BindGroupAndStorageBarrierTrackerBase(),
+ mDevice(device),
mViewAllocator(device->GetViewShaderVisibleDescriptorAllocator()),
mSamplerAllocator(device->GetSamplerShaderVisibleDescriptorAllocator()) {
}
@@ -114,10 +106,10 @@ namespace dawn_native { namespace d3d12 {
// TODO(bryan.bernhart@intel.com): Consider further optimization.
bool didCreateBindGroupViews = true;
bool didCreateBindGroupSamplers = true;
- for (uint32_t index : IterateBitSet(mDirtyBindGroups)) {
+ for (BindGroupIndex index : IterateBitSet(mDirtyBindGroups)) {
BindGroup* group = ToBackend(mBindGroups[index]);
didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
- didCreateBindGroupSamplers = group->PopulateSamplers(mSamplerAllocator);
+ didCreateBindGroupSamplers = group->PopulateSamplers(mDevice, mSamplerAllocator);
if (!didCreateBindGroupViews && !didCreateBindGroupSamplers) {
break;
}
@@ -140,24 +132,25 @@ namespace dawn_native { namespace d3d12 {
// Must be called before applying the bindgroups.
SetID3D12DescriptorHeaps(commandList);
- for (uint32_t index : IterateBitSet(mBindGroupLayoutsMask)) {
+ for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
BindGroup* group = ToBackend(mBindGroups[index]);
didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
- didCreateBindGroupSamplers = group->PopulateSamplers(mSamplerAllocator);
+ didCreateBindGroupSamplers =
+ group->PopulateSamplers(mDevice, mSamplerAllocator);
ASSERT(didCreateBindGroupViews);
ASSERT(didCreateBindGroupSamplers);
}
}
- for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
BindGroup* group = ToBackend(mBindGroups[index]);
ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
}
if (mInCompute) {
- for (uint32_t index : IterateBitSet(mBindGroupLayoutsMask)) {
- for (uint32_t binding : IterateBitSet(mBindingsNeedingBarrier[index])) {
+ for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
+ for (BindingIndex binding : IterateBitSet(mBindingsNeedingBarrier[index])) {
wgpu::BindingType bindingType = mBindingTypes[index][binding];
switch (bindingType) {
case wgpu::BindingType::StorageBuffer:
@@ -166,20 +159,24 @@ namespace dawn_native { namespace d3d12 {
wgpu::BufferUsage::Storage);
break;
- case wgpu::BindingType::ReadonlyStorageTexture:
- ToBackend(static_cast<TextureView*>(mBindings[index][binding])
- ->GetTexture())
+ case wgpu::BindingType::ReadonlyStorageTexture: {
+ TextureViewBase* view =
+ static_cast<TextureViewBase*>(mBindings[index][binding]);
+ ToBackend(view->GetTexture())
->TrackUsageAndTransitionNow(commandContext,
- kReadonlyStorageTexture);
+ kReadonlyStorageTexture,
+ view->GetSubresourceRange());
break;
-
- case wgpu::BindingType::WriteonlyStorageTexture:
- ToBackend(static_cast<TextureView*>(mBindings[index][binding])
- ->GetTexture())
+ }
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ TextureViewBase* view =
+ static_cast<TextureViewBase*>(mBindings[index][binding]);
+ ToBackend(view->GetTexture())
->TrackUsageAndTransitionNow(commandContext,
- wgpu::TextureUsage::Storage);
+ wgpu::TextureUsage::Storage,
+ view->GetSubresourceRange());
break;
-
+ }
case wgpu::BindingType::StorageTexture:
// Not implemented.
@@ -214,19 +211,29 @@ namespace dawn_native { namespace d3d12 {
private:
void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
const PipelineLayout* pipelineLayout,
- uint32_t index,
+ BindGroupIndex index,
BindGroup* group,
- uint32_t dynamicOffsetCount,
- const uint64_t* dynamicOffsets) {
- ASSERT(dynamicOffsetCount == group->GetLayout()->GetDynamicBufferCount());
+ uint32_t dynamicOffsetCountIn,
+ const uint64_t* dynamicOffsetsIn) {
+ ityp::span<BindingIndex, const uint64_t> dynamicOffsets(
+ dynamicOffsetsIn, BindingIndex(dynamicOffsetCountIn));
+ ASSERT(dynamicOffsets.size() == group->GetLayout()->GetDynamicBufferCount());
// Usually, the application won't set the same offsets many times,
// so always try to apply dynamic offsets even if the offsets stay the same
- if (dynamicOffsetCount != 0) {
+ if (dynamicOffsets.size() != BindingIndex(0)) {
// Update dynamic offsets.
// Dynamic buffer bindings are packed at the beginning of the layout.
- for (BindingIndex bindingIndex = 0; bindingIndex < dynamicOffsetCount;
+ for (BindingIndex bindingIndex{0}; bindingIndex < dynamicOffsets.size();
++bindingIndex) {
+ const BindingInfo& bindingInfo =
+ group->GetLayout()->GetBindingInfo(bindingIndex);
+ if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+ // Skip dynamic buffers that are not visible. D3D12 does not have None
+ // visibility.
+ continue;
+ }
+
uint32_t parameterIndex =
pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
@@ -238,7 +245,7 @@ namespace dawn_native { namespace d3d12 {
D3D12_GPU_VIRTUAL_ADDRESS bufferLocation =
ToBackend(binding.buffer)->GetVA() + offset;
- switch (group->GetLayout()->GetBindingInfo(bindingIndex).type) {
+ switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer:
if (mInCompute) {
commandList->SetComputeRootConstantBufferView(parameterIndex,
@@ -310,6 +317,8 @@ namespace dawn_native { namespace d3d12 {
}
}
+ Device* mDevice;
+
bool mInCompute = false;
ShaderVisibleDescriptorAllocator* mViewAllocator;
@@ -421,15 +430,17 @@ namespace dawn_native { namespace d3d12 {
continue;
}
- Texture* colorTexture =
- ToBackend(renderPass->colorAttachments[i].view->GetTexture());
+ TextureViewBase* colorView = renderPass->colorAttachments[i].view.Get();
+ Texture* colorTexture = ToBackend(colorView->GetTexture());
Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
// Transition the usages of the color attachment and resolve target.
colorTexture->TrackUsageAndTransitionNow(commandContext,
- D3D12_RESOURCE_STATE_RESOLVE_SOURCE);
+ D3D12_RESOURCE_STATE_RESOLVE_SOURCE,
+ colorView->GetSubresourceRange());
resolveTexture->TrackUsageAndTransitionNow(commandContext,
- D3D12_RESOURCE_STATE_RESOLVE_DEST);
+ D3D12_RESOURCE_STATE_RESOLVE_DEST,
+ resolveTarget->GetSubresourceRange());
// Do MSAA resolve with ResolveSubResource().
ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
@@ -488,21 +499,17 @@ namespace dawn_native { namespace d3d12 {
// cleared during record render pass if the texture subresource has not been
// initialized before the render pass.
if (!(usages.textureUsages[i].usage & wgpu::TextureUsage::OutputAttachment)) {
- texture->EnsureSubresourceContentInitialized(commandContext, 0,
- texture->GetNumMipLevels(), 0,
- texture->GetArrayLayers());
+ texture->EnsureSubresourceContentInitialized(commandContext,
+ texture->GetAllSubresources());
}
}
wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
for (size_t i = 0; i < usages.textures.size(); ++i) {
- D3D12_RESOURCE_BARRIER barrier;
- if (ToBackend(usages.textures[i])
- ->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
- usages.textureUsages[i].usage)) {
- barriers.push_back(barrier);
- }
+ ToBackend(usages.textures[i])
+ ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
+ usages.textureUsages[i]);
textureUsages |= usages.textureUsages[i].usage;
}
@@ -569,41 +576,60 @@ namespace dawn_native { namespace d3d12 {
Buffer* buffer = ToBackend(copy->source.buffer.Get());
Texture* texture = ToBackend(copy->destination.texture.Get());
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ // TODO(jiawei.shao@intel.com): use copy->destination.origin.z instead of
+ // copy->destination.arrayLayer once GPUTextureCopyView.arrayLayer to
+ // GPUTextureCopyView.origin.z is done.
+ SubresourceRange subresources = {copy->destination.mipLevel, 1,
+ copy->destination.arrayLayer,
+ copy->copySize.depth};
if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
copy->destination.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(
- true, copy->destination.mipLevel, 1, copy->destination.arrayLayer, 1);
+ texture->SetIsSubresourceContentInitialized(true, subresources);
} else {
- texture->EnsureSubresourceContentInitialized(
- commandContext, copy->destination.mipLevel, 1,
- copy->destination.arrayLayer, 1);
+ texture->EnsureSubresourceContentInitialized(commandContext, subresources);
}
buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
- texture->TrackUsageAndTransitionNow(commandContext,
- wgpu::TextureUsage::CopyDst);
-
- auto copySplit = ComputeTextureCopySplit(
- copy->destination.origin, copy->copySize, texture->GetFormat(),
- copy->source.offset, copy->source.bytesPerRow, copy->source.rowsPerImage);
-
- D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(texture, copy->destination.mipLevel,
- copy->destination.arrayLayer);
-
- for (uint32_t i = 0; i < copySplit.count; ++i) {
- TextureCopySplit::CopyInfo& info = copySplit.copies[i];
-
- D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(
- texture, buffer->GetD3D12Resource().Get(), info.bufferSize,
- copySplit.offset, copy->source.bytesPerRow);
- D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
-
- commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
- info.textureOffset.y, info.textureOffset.z,
- &bufferLocation, &sourceRegion);
+ texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
+ subresources);
+
+ const uint64_t bytesPerSlice =
+ copy->source.bytesPerRow * copy->source.rowsPerImage;
+
+ const dawn_native::Extent3D copyOneLayerSize = {copy->copySize.width,
+ copy->copySize.height, 1};
+ uint64_t bufferOffsetForNextSlice = 0;
+ for (uint32_t copySlice = copy->destination.arrayLayer;
+ copySlice < copy->destination.arrayLayer + copy->copySize.depth;
+ ++copySlice) {
+ // TODO(jiawei.shao@intel.com): compute copySplit once for all texture array
+ // layers when possible.
+ auto copySplit = ComputeTextureCopySplit(
+ copy->destination.origin, copyOneLayerSize, texture->GetFormat(),
+ bufferOffsetForNextSlice + copy->source.offset,
+ copy->source.bytesPerRow, copy->source.rowsPerImage);
+
+ D3D12_TEXTURE_COPY_LOCATION textureLocation =
+ ComputeTextureCopyLocationForTexture(
+ texture, copy->destination.mipLevel, copySlice);
+
+ for (uint32_t i = 0; i < copySplit.count; ++i) {
+ const TextureCopySplit::CopyInfo& info = copySplit.copies[i];
+
+ D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+ ComputeBufferLocationForCopyTextureRegion(
+ texture, buffer->GetD3D12Resource().Get(), info.bufferSize,
+ copySplit.offset, copy->source.bytesPerRow);
+ D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
+
+ commandList->CopyTextureRegion(
+ &textureLocation, info.textureOffset.x, info.textureOffset.y,
+ info.textureOffset.z, &bufferLocation, &sourceRegion);
+ }
+
+ bufferOffsetForNextSlice += bytesPerSlice;
}
break;
}
@@ -613,36 +639,54 @@ namespace dawn_native { namespace d3d12 {
Texture* texture = ToBackend(copy->source.texture.Get());
Buffer* buffer = ToBackend(copy->destination.buffer.Get());
- texture->EnsureSubresourceContentInitialized(
- commandContext, copy->source.mipLevel, 1, copy->source.arrayLayer, 1);
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ // TODO(jiawei.shao@intel.com): use copy->destination.origin.z instead of
+ // copy->destination.arrayLayer once GPUTextureCopyView.arrayLayer to
+ // GPUTextureCopyView.origin.z is done.
+ SubresourceRange subresources = {copy->source.mipLevel, 1,
+ copy->source.arrayLayer, copy->copySize.depth};
+ texture->EnsureSubresourceContentInitialized(commandContext, subresources);
- texture->TrackUsageAndTransitionNow(commandContext,
- wgpu::TextureUsage::CopySrc);
+ texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+ subresources);
buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
- TextureCopySplit copySplit = ComputeTextureCopySplit(
- copy->source.origin, copy->copySize, texture->GetFormat(),
- copy->destination.offset, copy->destination.bytesPerRow,
- copy->destination.rowsPerImage);
-
- D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(texture, copy->source.mipLevel,
- copy->source.arrayLayer);
-
- for (uint32_t i = 0; i < copySplit.count; ++i) {
- TextureCopySplit::CopyInfo& info = copySplit.copies[i];
-
- D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(
- texture, buffer->GetD3D12Resource().Get(), info.bufferSize,
- copySplit.offset, copy->destination.bytesPerRow);
-
- D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
+ const uint64_t bytesPerSlice =
+ copy->destination.bytesPerRow * copy->destination.rowsPerImage;
+
+ const dawn_native::Extent3D copyOneLayerSize = {copy->copySize.width,
+ copy->copySize.height, 1};
+ uint64_t bufferOffsetForNextSlice = 0;
+ for (uint32_t copySlice = copy->source.arrayLayer;
+ copySlice < copy->source.arrayLayer + copy->copySize.depth; ++copySlice) {
+ // TODO(jiawei.shao@intel.com): compute copySplit once for all texture array
+ // layers when possible.
+ TextureCopySplit copySplit = ComputeTextureCopySplit(
+ copy->source.origin, copyOneLayerSize, texture->GetFormat(),
+ bufferOffsetForNextSlice + copy->destination.offset,
+ copy->destination.bytesPerRow, copy->destination.rowsPerImage);
+
+ D3D12_TEXTURE_COPY_LOCATION textureLocation =
+ ComputeTextureCopyLocationForTexture(texture, copy->source.mipLevel,
+ copySlice);
+
+ for (uint32_t i = 0; i < copySplit.count; ++i) {
+ const TextureCopySplit::CopyInfo& info = copySplit.copies[i];
+
+ D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+ ComputeBufferLocationForCopyTextureRegion(
+ texture, buffer->GetD3D12Resource().Get(), info.bufferSize,
+ copySplit.offset, copy->destination.bytesPerRow);
+
+ D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
+
+ commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
+ info.bufferOffset.y, info.bufferOffset.z,
+ &textureLocation, &sourceRegion);
+ }
- commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
- info.bufferOffset.y, info.bufferOffset.z,
- &textureLocation, &sourceRegion);
+ bufferOffsetForNextSlice += bytesPerSlice;
}
break;
}
@@ -653,41 +697,62 @@ namespace dawn_native { namespace d3d12 {
Texture* source = ToBackend(copy->source.texture.Get());
Texture* destination = ToBackend(copy->destination.texture.Get());
+ SubresourceRange srcRange = {copy->source.mipLevel, 1, copy->source.arrayLayer,
+ copy->copySize.depth};
+ SubresourceRange dstRange = {copy->destination.mipLevel, 1,
+ copy->destination.arrayLayer,
+ copy->copySize.depth};
- source->EnsureSubresourceContentInitialized(
- commandContext, copy->source.mipLevel, 1, copy->source.arrayLayer, 1);
+ source->EnsureSubresourceContentInitialized(commandContext, srcRange);
if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
copy->destination.mipLevel)) {
- destination->SetIsSubresourceContentInitialized(
- true, copy->destination.mipLevel, 1, copy->destination.arrayLayer, 1);
+ destination->SetIsSubresourceContentInitialized(true, dstRange);
} else {
- destination->EnsureSubresourceContentInitialized(
- commandContext, copy->destination.mipLevel, 1,
- copy->destination.arrayLayer, 1);
+ destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
}
- source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc);
+
+ if (copy->source.texture.Get() == copy->destination.texture.Get() &&
+ copy->source.mipLevel == copy->destination.mipLevel) {
+ // When there are overlapped subresources, the layout of the overlapped
+ // subresources should all be COMMON instead of what we set now. Currently
+ // it is not allowed to copy with overlapped subresources, but we still
+ // add the ASSERT here as a reminder for this possible misuse.
+ ASSERT(!IsRangeOverlapped(copy->source.arrayLayer,
+ copy->destination.arrayLayer,
+ copy->copySize.depth));
+ }
+ source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+ srcRange);
destination->TrackUsageAndTransitionNow(commandContext,
- wgpu::TextureUsage::CopyDst);
+ wgpu::TextureUsage::CopyDst, dstRange);
if (CanUseCopyResource(source, destination, copy->copySize)) {
commandList->CopyResource(destination->GetD3D12Resource(),
source->GetD3D12Resource());
} else {
- D3D12_TEXTURE_COPY_LOCATION srcLocation =
- ComputeTextureCopyLocationForTexture(source, copy->source.mipLevel,
- copy->source.arrayLayer);
-
- D3D12_TEXTURE_COPY_LOCATION dstLocation =
- ComputeTextureCopyLocationForTexture(destination,
- copy->destination.mipLevel,
- copy->destination.arrayLayer);
-
- D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(copy->source.origin, copy->copySize);
-
- commandList->CopyTextureRegion(
- &dstLocation, copy->destination.origin.x, copy->destination.origin.y,
- copy->destination.origin.z, &srcLocation, &sourceRegion);
+ // TODO(jiawei.shao@intel.com): support copying with 1D and 3D textures.
+ ASSERT(source->GetDimension() == wgpu::TextureDimension::e2D &&
+ destination->GetDimension() == wgpu::TextureDimension::e2D);
+ const dawn_native::Extent3D copyExtentOneSlice = {
+ copy->copySize.width, copy->copySize.height, 1u};
+ for (uint32_t slice = 0; slice < copy->copySize.depth; ++slice) {
+ D3D12_TEXTURE_COPY_LOCATION srcLocation =
+ ComputeTextureCopyLocationForTexture(
+ source, copy->source.mipLevel, copy->source.arrayLayer + slice);
+
+ D3D12_TEXTURE_COPY_LOCATION dstLocation =
+ ComputeTextureCopyLocationForTexture(
+ destination, copy->destination.mipLevel,
+ copy->destination.arrayLayer + slice);
+
+ D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
+ copy->source.origin, copyExtentOneSlice);
+
+ commandList->CopyTextureRegion(&dstLocation, copy->destination.origin.x,
+ copy->destination.origin.y,
+ copy->destination.origin.z, &srcLocation,
+ &sourceRegion);
+ }
}
break;
}
@@ -847,7 +912,8 @@ namespace dawn_native { namespace d3d12 {
ToBackend(resolveDestinationView->GetTexture());
resolveDestinationTexture->TrackUsageAndTransitionNow(
- commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST);
+ commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST,
+ resolveDestinationView->GetSubresourceRange());
renderPassBuilder->SetRenderTargetEndingAccessResolve(i, attachmentInfo.storeOp,
view, resolveDestinationView);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
index 81dad3c138e..d652d878e6b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
@@ -61,7 +61,7 @@ namespace dawn_native { namespace d3d12 {
// common state right before command list submission. TransitionUsageNow itself ensures
// no unnecessary transitions happen if the resources is already in the common state.
for (Texture* texture : mSharedTextures) {
- texture->TrackUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
+ texture->TrackAllUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
}
MaybeError error =
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
index 940a3e9d3fa..0c9fc6f6b00 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
@@ -19,6 +19,7 @@
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
#include "dawn_native/d3d12/ShaderModuleD3D12.h"
+#include "dawn_native/d3d12/UtilsD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -44,21 +45,28 @@ namespace dawn_native { namespace d3d12 {
std::string hlslSource;
DAWN_TRY_ASSIGN(hlslSource, module->GetHLSLSource(ToBackend(GetLayout())));
- ComPtr<ID3DBlob> compiledShader;
- ComPtr<ID3DBlob> errors;
-
- const PlatformFunctions* functions = device->GetFunctions();
- if (FAILED(functions->d3dCompile(hlslSource.c_str(), hlslSource.length(), nullptr, nullptr,
- nullptr, descriptor->computeStage.entryPoint, "cs_5_1",
- compileFlags, 0, &compiledShader, &errors))) {
- printf("%s\n", reinterpret_cast<char*>(errors->GetBufferPointer()));
- ASSERT(false);
- }
-
D3D12_COMPUTE_PIPELINE_STATE_DESC d3dDesc = {};
d3dDesc.pRootSignature = ToBackend(GetLayout())->GetRootSignature();
- d3dDesc.CS.pShaderBytecode = compiledShader->GetBufferPointer();
- d3dDesc.CS.BytecodeLength = compiledShader->GetBufferSize();
+
+ ComPtr<IDxcBlob> compiledDXCShader;
+ ComPtr<ID3DBlob> compiledFXCShader;
+
+ if (device->IsToggleEnabled(Toggle::UseDXC)) {
+ DAWN_TRY_ASSIGN(
+ compiledDXCShader,
+ module->CompileShaderDXC(SingleShaderStage::Compute, hlslSource,
+ descriptor->computeStage.entryPoint, compileFlags));
+
+ d3dDesc.CS.pShaderBytecode = compiledDXCShader->GetBufferPointer();
+ d3dDesc.CS.BytecodeLength = compiledDXCShader->GetBufferSize();
+ } else {
+ DAWN_TRY_ASSIGN(
+ compiledFXCShader,
+ module->CompileShaderFXC(SingleShaderStage::Compute, hlslSource,
+ descriptor->computeStage.entryPoint, compileFlags));
+ d3dDesc.CS.pShaderBytecode = compiledFXCShader->GetBufferPointer();
+ d3dDesc.CS.BytecodeLength = compiledFXCShader->GetBufferSize();
+ }
device->GetD3D12Device()->CreateComputePipelineState(&d3dDesc,
IID_PPV_ARGS(&mPipelineState));
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
index d91d479c735..efc9fe09bad 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
@@ -14,28 +14,38 @@
#include "dawn_native/d3d12/D3D12Error.h"
+#include <iomanip>
+#include <sstream>
#include <string>
namespace dawn_native { namespace d3d12 {
- MaybeError CheckHRESULT(HRESULT result, const char* context) {
+ MaybeError CheckHRESULTImpl(HRESULT result, const char* context) {
if (DAWN_LIKELY(SUCCEEDED(result))) {
return {};
}
- std::string message = std::string(context) + " failed with " + std::to_string(result);
+ std::ostringstream messageStream;
+ messageStream << context << " failed with ";
+ if (result == E_FAKE_ERROR_FOR_TESTING) {
+ messageStream << "E_FAKE_ERROR_FOR_TESTING";
+ } else {
+ messageStream << "0x" << std::uppercase << std::setfill('0') << std::setw(8) << std::hex
+ << result;
+ }
if (result == DXGI_ERROR_DEVICE_REMOVED) {
- return DAWN_DEVICE_LOST_ERROR(message);
+ return DAWN_DEVICE_LOST_ERROR(messageStream.str());
} else {
- return DAWN_INTERNAL_ERROR(message);
+ return DAWN_INTERNAL_ERROR(messageStream.str());
}
}
- MaybeError CheckOutOfMemoryHRESULT(HRESULT result, const char* context) {
- if (result == E_OUTOFMEMORY) {
+ MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context) {
+ if (result == E_OUTOFMEMORY || result == E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING) {
return DAWN_OUT_OF_MEMORY_ERROR(context);
}
- return CheckHRESULT(result, context);
+
+ return CheckHRESULTImpl(result, context);
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
index ed11a8c7c5d..16ea4c4e834 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
@@ -17,14 +17,28 @@
#include <d3d12.h>
#include "dawn_native/Error.h"
+#include "dawn_native/ErrorInjector.h"
namespace dawn_native { namespace d3d12 {
+ constexpr HRESULT E_FAKE_ERROR_FOR_TESTING = MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFF);
+ constexpr HRESULT E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING =
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFE);
+
// Returns a success only if result of HResult is success
- MaybeError CheckHRESULT(HRESULT result, const char* context);
+ MaybeError CheckHRESULTImpl(HRESULT result, const char* context);
// Uses CheckRESULT but returns OOM specific error when recoverable.
- MaybeError CheckOutOfMemoryHRESULT(HRESULT result, const char* context);
+ MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context);
+
+#define CheckHRESULT(resultIn, contextIn) \
+ ::dawn_native::d3d12::CheckHRESULTImpl( \
+ INJECT_ERROR_OR_RUN(resultIn, E_FAKE_ERROR_FOR_TESTING), contextIn)
+#define CheckOutOfMemoryHRESULT(resultIn, contextIn) \
+ ::dawn_native::d3d12::CheckOutOfMemoryHRESULTImpl( \
+ INJECT_ERROR_OR_RUN(resultIn, E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING, \
+ E_FAKE_ERROR_FOR_TESTING), \
+ contextIn)
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
index cfd109f691e..9f180450c5c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "dawn_native/BackendConnection.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/Instance.h"
#include "dawn_native/d3d12/AdapterD3D12.h"
#include "dawn_native/d3d12/BackendD3D12.h"
#include "dawn_native/d3d12/BindGroupD3D12.h"
@@ -33,6 +34,7 @@
#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
+#include "dawn_native/d3d12/SamplerHeapCacheD3D12.h"
#include "dawn_native/d3d12/ShaderModuleD3D12.h"
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/StagingBufferD3D12.h"
@@ -40,12 +42,16 @@
#include "dawn_native/d3d12/SwapChainD3D12.h"
#include "dawn_native/d3d12/TextureD3D12.h"
+#include <sstream>
+
namespace dawn_native { namespace d3d12 {
// TODO(dawn:155): Figure out these values.
static constexpr uint16_t kShaderVisibleDescriptorHeapSize = 1024;
static constexpr uint8_t kAttachmentDescriptorHeapSize = 64;
+ static constexpr uint64_t kMaxDebugMessagesToPrint = 5;
+
// static
ResultOrError<Device*> Device::Create(Adapter* adapter, const DeviceDescriptor* descriptor) {
Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
@@ -83,14 +89,6 @@ namespace dawn_native { namespace d3d12 {
// Initialize backend services
mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
- DAWN_TRY_ASSIGN(
- mViewShaderVisibleDescriptorAllocator,
- ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
-
- DAWN_TRY_ASSIGN(
- mSamplerShaderVisibleDescriptorAllocator,
- ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
-
// Zero sized allocator is never requested and does not need to exist.
for (uint32_t countIndex = 1; countIndex < kNumOfStagingDescriptorAllocators;
countIndex++) {
@@ -109,11 +107,19 @@ namespace dawn_native { namespace d3d12 {
mDepthStencilViewAllocator = std::make_unique<StagingDescriptorAllocator>(
this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_DSV);
- mMapRequestTracker = std::make_unique<MapRequestTracker>(this);
+ mSamplerHeapCache = std::make_unique<SamplerHeapCache>(this);
+
mResidencyManager = std::make_unique<ResidencyManager>(this);
mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
- DAWN_TRY(NextSerial());
+ // ShaderVisibleDescriptorAllocators use the ResidencyManager and must be initialized after.
+ DAWN_TRY_ASSIGN(
+ mSamplerShaderVisibleDescriptorAllocator,
+ ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
+
+ DAWN_TRY_ASSIGN(
+ mViewShaderVisibleDescriptorAllocator,
+ ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
// Initialize indirect commands
D3D12_INDIRECT_ARGUMENT_DESC argumentDesc = {};
@@ -139,7 +145,11 @@ namespace dawn_native { namespace d3d12 {
GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
IID_PPV_ARGS(&mDrawIndexedIndirectSignature));
- return DeviceBase::Initialize(new Queue(this));
+ DAWN_TRY(DeviceBase::Initialize(new Queue(this)));
+ // Device shouldn't be used until after DeviceBase::Initialize so we must wait until after
+ // device initialization to call NextSerial
+ DAWN_TRY(NextSerial());
+ return {};
}
Device::~Device() {
@@ -174,12 +184,16 @@ namespace dawn_native { namespace d3d12 {
return ToBackend(GetAdapter())->GetBackend()->GetFactory();
}
- const PlatformFunctions* Device::GetFunctions() const {
- return ToBackend(GetAdapter())->GetBackend()->GetFunctions();
+ ResultOrError<IDxcLibrary*> Device::GetOrCreateDxcLibrary() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetOrCreateDxcLibrary();
}
- MapRequestTracker* Device::GetMapRequestTracker() const {
- return mMapRequestTracker.get();
+ ResultOrError<IDxcCompiler*> Device::GetOrCreateDxcCompiler() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetOrCreateDxcCompiler();
+ }
+
+ const PlatformFunctions* Device::GetFunctions() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetFunctions();
}
CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
@@ -200,8 +214,6 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError Device::TickImpl() {
- CheckPassedSerials();
-
// Perform cleanup operations to free unused objects
Serial completedSerial = GetCompletedCommandSerial();
@@ -211,10 +223,12 @@ namespace dawn_native { namespace d3d12 {
mSamplerShaderVisibleDescriptorAllocator->Tick(completedSerial);
mRenderTargetViewAllocator->Tick(completedSerial);
mDepthStencilViewAllocator->Tick(completedSerial);
- mMapRequestTracker->Tick(completedSerial);
mUsedComObjectRefs.ClearUpTo(completedSerial);
DAWN_TRY(ExecutePendingCommandContext());
DAWN_TRY(NextSerial());
+
+ DAWN_TRY(CheckDebugLayerAndGenerateErrors());
+
return {};
}
@@ -231,6 +245,7 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(serial, mFenceEvent),
"D3D12 set event on completion"));
WaitForSingleObject(mFenceEvent, INFINITE);
+ CheckPassedSerials();
}
return {};
}
@@ -272,6 +287,9 @@ namespace dawn_native { namespace d3d12 {
const PipelineLayoutDescriptor* descriptor) {
return PipelineLayout::Create(this, descriptor);
}
+ ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation");
+ }
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
return RenderPipeline::Create(this, descriptor);
@@ -446,29 +464,66 @@ namespace dawn_native { namespace d3d12 {
// Wait for all in-flight commands to finish executing
DAWN_TRY(WaitForSerial(GetLastSubmittedCommandSerial()));
- // Call tick one last time so resources are cleaned up.
- DAWN_TRY(TickImpl());
-
- // Force all operations to look as if they were completed
- AssumeCommandsComplete();
return {};
}
+ MaybeError Device::CheckDebugLayerAndGenerateErrors() {
+ if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
+ return {};
+ }
+
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ ASSERT_SUCCESS(mD3d12Device.As(&infoQueue));
+ uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
+
+ // Check if any errors have occurred otherwise we would be creating an empty error. Note
+ // that we use GetNumStoredMessagesAllowedByRetrievalFilter instead of GetNumStoredMessages
+ // because we only convert WARNINGS or higher messages to dawn errors.
+ if (totalErrors == 0) {
+ return {};
+ }
+
+ std::ostringstream messages;
+ uint64_t errorsToPrint = std::min(kMaxDebugMessagesToPrint, totalErrors);
+ for (uint64_t i = 0; i < errorsToPrint; ++i) {
+ SIZE_T messageLength = 0;
+ HRESULT hr = infoQueue->GetMessage(i, nullptr, &messageLength);
+ if (FAILED(hr)) {
+ messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
+ continue;
+ }
+
+ std::unique_ptr<uint8_t[]> messageData(new uint8_t[messageLength]);
+ D3D12_MESSAGE* message = reinterpret_cast<D3D12_MESSAGE*>(messageData.get());
+ hr = infoQueue->GetMessage(i, message, &messageLength);
+ if (FAILED(hr)) {
+ messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
+ continue;
+ }
+
+ messages << message->pDescription << " (" << message->ID << ")\n";
+ }
+ if (errorsToPrint < totalErrors) {
+ messages << (totalErrors - errorsToPrint) << " messages silenced\n";
+ }
+ // We only print up to the first kMaxDebugMessagesToPrint errors
+ infoQueue->ClearStoredMessages();
+
+ return DAWN_INTERNAL_ERROR(messages.str());
+ }
+
void Device::ShutDownImpl() {
ASSERT(GetState() == State::Disconnected);
- // Immediately forget about all pending commands
+ // Immediately forget about all pending commands for the case where device is lost on its
+ // own and WaitForIdleForDestruction isn't called.
mPendingCommands.Release();
- // Some operations might have been started since the last submit and waiting
- // on a serial that doesn't have a corresponding fence enqueued. Force all
- // operations to look as if they were completed (because they were).
- AssumeCommandsComplete();
-
if (mFenceEvent != nullptr) {
::CloseHandle(mFenceEvent);
}
+ // We need to handle clearing up com object refs that were enqeued after TickImpl
mUsedComObjectRefs.ClearUpTo(GetCompletedCommandSerial());
ASSERT(mUsedComObjectRefs.Empty());
@@ -503,4 +558,8 @@ namespace dawn_native { namespace d3d12 {
return mDepthStencilViewAllocator.get();
}
+ SamplerHeapCache* Device::GetSamplerHeapCache() {
+ return mSamplerHeapCache.get();
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
index 450523024aa..1ee40927dfb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
@@ -31,10 +31,10 @@ namespace dawn_native { namespace d3d12 {
class CommandAllocatorManager;
class DescriptorHeapAllocator;
- class MapRequestTracker;
class PlatformFunctions;
class ResidencyManager;
class ResourceAllocatorManager;
+ class SamplerHeapCache;
class ShaderVisibleDescriptorAllocator;
class StagingDescriptorAllocator;
@@ -65,12 +65,13 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12CommandSignature> GetDrawIndirectSignature() const;
ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectSignature() const;
- MapRequestTracker* GetMapRequestTracker() const;
CommandAllocatorManager* GetCommandAllocatorManager() const;
ResidencyManager* GetResidencyManager() const;
const PlatformFunctions* GetFunctions() const;
ComPtr<IDXGIFactory4> GetFactory() const;
+ ResultOrError<IDxcLibrary*> GetOrCreateDxcLibrary() const;
+ ResultOrError<IDxcCompiler*> GetOrCreateDxcCompiler() const;
ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
@@ -107,6 +108,8 @@ namespace dawn_native { namespace d3d12 {
StagingDescriptorAllocator* GetSamplerStagingDescriptorAllocator(
uint32_t descriptorCount) const;
+ SamplerHeapCache* GetSamplerHeapCache();
+
StagingDescriptorAllocator* GetRenderTargetViewAllocator() const;
StagingDescriptorAllocator* GetDepthStencilViewAllocator() const;
@@ -133,6 +136,8 @@ namespace dawn_native { namespace d3d12 {
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
@@ -153,6 +158,8 @@ namespace dawn_native { namespace d3d12 {
void ShutDownImpl() override;
MaybeError WaitForIdleForDestruction() override;
+ MaybeError CheckDebugLayerAndGenerateErrors();
+
ComPtr<ID3D12Fence> mFence;
HANDLE mFenceEvent = nullptr;
Serial CheckAndUpdateCompletedSerials() override;
@@ -174,7 +181,6 @@ namespace dawn_native { namespace d3d12 {
SerialQueue<ComPtr<IUnknown>> mUsedComObjectRefs;
std::unique_ptr<CommandAllocatorManager> mCommandAllocatorManager;
- std::unique_ptr<MapRequestTracker> mMapRequestTracker;
std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
std::unique_ptr<ResidencyManager> mResidencyManager;
@@ -194,6 +200,10 @@ namespace dawn_native { namespace d3d12 {
std::unique_ptr<ShaderVisibleDescriptorAllocator> mViewShaderVisibleDescriptorAllocator;
std::unique_ptr<ShaderVisibleDescriptorAllocator> mSamplerShaderVisibleDescriptorAllocator;
+
+ // Sampler cache needs to be destroyed before the CPU sampler allocator to ensure the final
+ // release is called.
+ std::unique_ptr<SamplerHeapCache> mSamplerHeapCache;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h b/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
index 1143a4a5acb..4e5368b63a5 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
@@ -28,6 +28,7 @@ namespace dawn_native { namespace d3d12 {
class Device;
class Heap;
class PipelineLayout;
+ class QuerySet;
class Queue;
class RenderPipeline;
class Sampler;
@@ -46,6 +47,7 @@ namespace dawn_native { namespace d3d12 {
using ComputePipelineType = ComputePipeline;
using DeviceType = Device;
using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
using QueueType = Queue;
using RenderPipelineType = RenderPipeline;
using ResourceHeapType = Heap;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
index add21cfcce4..ade5d4a5c1c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
@@ -16,16 +16,8 @@
namespace dawn_native { namespace d3d12 {
Heap::Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
- : mD3d12Pageable(std::move(d3d12Pageable)), mMemorySegment(memorySegment), mSize(size) {
- }
-
- Heap::~Heap() {
- // When a heap is destroyed, it no longer resides in resident memory, so we must evict
- // it from the LRU cache. If this heap is not manually removed from the LRU-cache, the
- // ResidencyManager will attempt to use it after it has been deallocated.
- if (IsInResidencyLRUCache()) {
- RemoveFromList();
- }
+ : Pageable(std::move(d3d12Pageable), memorySegment, size) {
+ mD3d12Pageable.As(&mD3d12Heap);
}
// This function should only be used when mD3D12Pageable was initialized from a
@@ -33,58 +25,7 @@ namespace dawn_native { namespace d3d12 {
// ID3D12Pageable was initially created as an ID3D12Resource (i.e. DirectAllocation), then
// use GetD3D12Pageable().
ID3D12Heap* Heap::GetD3D12Heap() const {
- ComPtr<ID3D12Heap> heap;
- HRESULT result = mD3d12Pageable.As(&heap);
- ASSERT(SUCCEEDED(result));
- return heap.Get();
- }
-
- ID3D12Pageable* Heap::GetD3D12Pageable() const {
- return mD3d12Pageable.Get();
- }
-
- MemorySegment Heap::GetMemorySegment() const {
- return mMemorySegment;
- }
-
- Serial Heap::GetLastUsage() const {
- return mLastUsage;
- }
-
- void Heap::SetLastUsage(Serial serial) {
- mLastUsage = serial;
- }
-
- uint64_t Heap::GetLastSubmission() const {
- return mLastSubmission;
- }
-
- void Heap::SetLastSubmission(Serial serial) {
- mLastSubmission = serial;
- }
-
- uint64_t Heap::GetSize() const {
- return mSize;
- }
-
- bool Heap::IsInResidencyLRUCache() const {
- return IsInList();
- }
-
- void Heap::IncrementResidencyLock() {
- mResidencyLockRefCount++;
- }
-
- void Heap::DecrementResidencyLock() {
- mResidencyLockRefCount--;
- }
-
- bool Heap::IsResidencyLocked() const {
- if (mResidencyLockRefCount == 0) {
- return false;
- }
-
- return true;
+ return mD3d12Heap.Get();
}
}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
index 71a4a0f2048..715ffcdaf10 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
@@ -15,67 +15,25 @@
#ifndef DAWNNATIVE_D3D12_HEAPD3D12_H_
#define DAWNNATIVE_D3D12_HEAPD3D12_H_
-#include "common/LinkedList.h"
-#include "common/Serial.h"
-#include "dawn_native/D3D12Backend.h"
#include "dawn_native/ResourceHeap.h"
+#include "dawn_native/d3d12/PageableD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
class Device;
- // This class is used to represent heap allocations, but also serves as a node within the
- // ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
- // allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
- // LRU cache when it is evicted from resident memory due to budget constraints, or when the heap
- // is destroyed.
- class Heap : public ResourceHeapBase, public LinkNode<Heap> {
+ // This class is used to represent ID3D12Heap allocations, as well as an implicit heap
+ // representing a directly allocated resource. It inherits from Pageable because each Heap must
+ // be represented in the ResidencyManager.
+ class Heap : public ResourceHeapBase, public Pageable {
public:
Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
- ~Heap();
ID3D12Heap* GetD3D12Heap() const;
- ID3D12Pageable* GetD3D12Pageable() const;
- MemorySegment GetMemorySegment() const;
-
- // We set mLastRecordingSerial to denote the serial this heap was last recorded to be used.
- // We must check this serial against the current serial when recording heap usages to ensure
- // we do not process residency for this heap multiple times.
- Serial GetLastUsage() const;
- void SetLastUsage(Serial serial);
-
- // The residency manager must know the last serial that any portion of the heap was
- // submitted to be used so that we can ensure this heap stays resident in memory at least
- // until that serial has completed.
- uint64_t GetLastSubmission() const;
- void SetLastSubmission(Serial serial);
-
- uint64_t GetSize() const;
-
- bool IsInResidencyLRUCache() const;
-
- // In some scenarios, such as async buffer mapping, we must lock residency to ensure the
- // heap cannot be evicted. Because multiple buffers may be mapped in a single heap, we must
- // track the number of resources currently locked.
- void IncrementResidencyLock();
- void DecrementResidencyLock();
- bool IsResidencyLocked() const;
private:
- ComPtr<ID3D12Pageable> mD3d12Pageable;
- MemorySegment mMemorySegment;
- // mLastUsage denotes the last time this heap was recorded for use.
- Serial mLastUsage = 0;
- // mLastSubmission denotes the last time this heap was submitted to the GPU. Note that
- // although this variable often contains the same value as mLastUsage, it can differ in some
- // situations. When some asynchronous APIs (like SetSubData) are called, mLastUsage is
- // updated upon the call, but the backend operation is deferred until the next submission
- // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
- // accurately identify when heaps are evictable.
- Serial mLastSubmission = 0;
- uint32_t mResidencyLockRefCount = 0;
- uint64_t mSize = 0;
+ ComPtr<ID3D12Heap> mD3d12Heap;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp
new file mode 100644
index 00000000000..58847808644
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp
@@ -0,0 +1,76 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/PageableD3D12.h"
+
+namespace dawn_native { namespace d3d12 {
+ Pageable::Pageable(ComPtr<ID3D12Pageable> d3d12Pageable,
+ MemorySegment memorySegment,
+ uint64_t size)
+ : mD3d12Pageable(std::move(d3d12Pageable)), mMemorySegment(memorySegment), mSize(size) {
+ }
+
+ // When a pageable is destroyed, it no longer resides in resident memory, so we must evict
+ // it from the LRU cache. If this heap is not manually removed from the LRU-cache, the
+ // ResidencyManager will attempt to use it after it has been deallocated.
+ Pageable::~Pageable() {
+ if (IsInResidencyLRUCache()) {
+ RemoveFromList();
+ }
+ }
+
+ ID3D12Pageable* Pageable::GetD3D12Pageable() const {
+ return mD3d12Pageable.Get();
+ }
+
+ Serial Pageable::GetLastUsage() const {
+ return mLastUsage;
+ }
+
+ void Pageable::SetLastUsage(Serial serial) {
+ mLastUsage = serial;
+ }
+
+ uint64_t Pageable::GetLastSubmission() const {
+ return mLastSubmission;
+ }
+
+ void Pageable::SetLastSubmission(Serial serial) {
+ mLastSubmission = serial;
+ }
+
+ MemorySegment Pageable::GetMemorySegment() const {
+ return mMemorySegment;
+ }
+
+ uint64_t Pageable::GetSize() const {
+ return mSize;
+ }
+
+ bool Pageable::IsInResidencyLRUCache() const {
+ return IsInList();
+ }
+
+ void Pageable::IncrementResidencyLock() {
+ mResidencyLockRefCount++;
+ }
+
+ void Pageable::DecrementResidencyLock() {
+ mResidencyLockRefCount--;
+ }
+
+ bool Pageable::IsResidencyLocked() const {
+ return mResidencyLockRefCount != 0;
+ }
+}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.h
new file mode 100644
index 00000000000..6b07adb4d44
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.h
@@ -0,0 +1,80 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_PAGEABLED3D12_H_
+#define DAWNNATIVE_D3D12_PAGEABLED3D12_H_
+
+#include "common/LinkedList.h"
+#include "common/Serial.h"
+#include "dawn_native/D3D12Backend.h"
+#include "dawn_native/d3d12/d3d12_platform.h"
+
+namespace dawn_native { namespace d3d12 {
+ // This class is used to represent ID3D12Pageable allocations, and also serves as a node within
+ // the ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
+ // allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
+ // LRU cache when it is evicted from resident memory due to budget constraints, or when the
+ // pageable allocation is released.
+ class Pageable : public LinkNode<Pageable> {
+ public:
+ Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
+ ~Pageable();
+
+ ID3D12Pageable* GetD3D12Pageable() const;
+
+ // We set mLastRecordingSerial to denote the serial this pageable was last recorded to be
+ // used. We must check this serial against the current serial when recording usages to
+ // ensure we do not process residency for this pageable multiple times.
+ Serial GetLastUsage() const;
+ void SetLastUsage(Serial serial);
+
+ // The residency manager must know the last serial that any portion of the pageable was
+ // submitted to be used so that we can ensure this pageable stays resident in memory at
+ // least until that serial has completed.
+ uint64_t GetLastSubmission() const;
+ void SetLastSubmission(Serial serial);
+
+ MemorySegment GetMemorySegment() const;
+
+ uint64_t GetSize() const;
+
+ bool IsInResidencyLRUCache() const;
+
+ // In some scenarios, such as async buffer mapping or descriptor heaps, we must lock
+ // residency to ensure the pageable cannot be evicted. Because multiple buffers may be
+ // mapped in a single heap, we must track the number of resources currently locked.
+ void IncrementResidencyLock();
+ void DecrementResidencyLock();
+ bool IsResidencyLocked() const;
+
+ protected:
+ ComPtr<ID3D12Pageable> mD3d12Pageable;
+
+ private:
+ // mLastUsage denotes the last time this pageable was recorded for use.
+ Serial mLastUsage = 0;
+ // mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that
+ // although this variable often contains the same value as mLastUsage, it can differ in some
+ // situations. When some asynchronous APIs (like WriteBuffer) are called, mLastUsage is
+ // updated upon the call, but the backend operation is deferred until the next submission
+ // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
+ // accurately identify when a pageable can be evicted.
+ Serial mLastSubmission = 0;
+ MemorySegment mMemorySegment;
+ uint32_t mResidencyLockRefCount = 0;
+ uint64_t mSize = 0;
+ };
+}} // namespace dawn_native::d3d12
+
+#endif
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
index 3c8a7f7b891..5e3460bb0f5 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
@@ -69,25 +69,17 @@ namespace dawn_native { namespace d3d12 {
MaybeError PipelineLayout::Initialize() {
Device* device = ToBackend(GetDevice());
- D3D12_ROOT_PARAMETER rootParameters[kMaxBindGroups * 2 + kMaxDynamicBufferCount];
-
- // A root parameter is one of these types
- union {
- D3D12_ROOT_DESCRIPTOR_TABLE DescriptorTable;
- D3D12_ROOT_CONSTANTS Constants;
- D3D12_ROOT_DESCRIPTOR Descriptor;
- } rootParameterValues[kMaxBindGroups * 2];
- // samplers must be in a separate descriptor table so we need at most twice as many tables
- // as bind groups
+ // Parameters are D3D12_ROOT_PARAMETER_TYPE which is either a root table, constant, or
+ // descriptor.
+ std::vector<D3D12_ROOT_PARAMETER> rootParameters;
// Ranges are D3D12_DESCRIPTOR_RANGE_TYPE_(SRV|UAV|CBV|SAMPLER)
// They are grouped together so each bind group has at most 4 ranges
D3D12_DESCRIPTOR_RANGE ranges[kMaxBindGroups * 4];
- uint32_t parameterIndex = 0;
uint32_t rangeIndex = 0;
- for (uint32_t group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
// Set the root descriptor table parameter and copy ranges. Ranges are offset by the
@@ -99,30 +91,31 @@ namespace dawn_native { namespace d3d12 {
return false;
}
- auto& rootParameter = rootParameters[parameterIndex];
+ D3D12_ROOT_PARAMETER rootParameter = {};
rootParameter.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
- rootParameter.DescriptorTable = rootParameterValues[parameterIndex].DescriptorTable;
rootParameter.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
rootParameter.DescriptorTable.NumDescriptorRanges = rangeCount;
rootParameter.DescriptorTable.pDescriptorRanges = &ranges[rangeIndex];
for (uint32_t i = 0; i < rangeCount; ++i) {
ranges[rangeIndex] = descriptorRanges[i];
- ranges[rangeIndex].RegisterSpace = group;
+ ranges[rangeIndex].RegisterSpace = static_cast<uint32_t>(group);
rangeIndex++;
}
+ rootParameters.emplace_back(rootParameter);
+
return true;
};
if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorTableSize(),
bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
- mCbvUavSrvRootParameterInfo[group] = parameterIndex++;
+ mCbvUavSrvRootParameterInfo[group] = rootParameters.size() - 1;
}
if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorTableSize(),
bindGroupLayout->GetSamplerDescriptorRanges())) {
- mSamplerRootParameterInfo[group] = parameterIndex++;
+ mSamplerRootParameterInfo[group] = rootParameters.size() - 1;
}
// Get calculated shader register for root descriptors
@@ -130,34 +123,42 @@ namespace dawn_native { namespace d3d12 {
// Init root descriptors in root signatures for dynamic buffer bindings.
// These are packed at the beginning of the layout binding info.
- for (BindingIndex dynamicBindingIndex = 0;
+ for (BindingIndex dynamicBindingIndex{0};
dynamicBindingIndex < bindGroupLayout->GetDynamicBufferCount();
++dynamicBindingIndex) {
const BindingInfo& bindingInfo =
bindGroupLayout->GetBindingInfo(dynamicBindingIndex);
- D3D12_ROOT_PARAMETER* rootParameter = &rootParameters[parameterIndex];
+ if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+ // Skip dynamic buffers that are not visible. D3D12 does not have None
+ // visibility.
+ continue;
+ }
+
+ D3D12_ROOT_PARAMETER rootParameter = {};
// Setup root descriptor.
D3D12_ROOT_DESCRIPTOR rootDescriptor;
rootDescriptor.ShaderRegister = shaderRegisters[dynamicBindingIndex];
- rootDescriptor.RegisterSpace = group;
+ rootDescriptor.RegisterSpace = static_cast<uint32_t>(group);
// Set root descriptors in root signatures.
- rootParameter->Descriptor = rootDescriptor;
- mDynamicRootParameterIndices[group][dynamicBindingIndex] = parameterIndex++;
+ rootParameter.Descriptor = rootDescriptor;
+ mDynamicRootParameterIndices[group][dynamicBindingIndex] = rootParameters.size();
// Set parameter types according to bind group layout descriptor.
- rootParameter->ParameterType = RootParameterType(bindingInfo.type);
+ rootParameter.ParameterType = RootParameterType(bindingInfo.type);
// Set visibilities according to bind group layout descriptor.
- rootParameter->ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
+ rootParameter.ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
+
+ rootParameters.emplace_back(rootParameter);
}
}
D3D12_ROOT_SIGNATURE_DESC rootSignatureDescriptor;
- rootSignatureDescriptor.NumParameters = parameterIndex;
- rootSignatureDescriptor.pParameters = rootParameters;
+ rootSignatureDescriptor.NumParameters = rootParameters.size();
+ rootSignatureDescriptor.pParameters = rootParameters.data();
rootSignatureDescriptor.NumStaticSamplers = 0;
rootSignatureDescriptor.pStaticSamplers = nullptr;
rootSignatureDescriptor.Flags =
@@ -176,13 +177,13 @@ namespace dawn_native { namespace d3d12 {
return {};
}
- uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(uint32_t group) const {
- ASSERT(group < kMaxBindGroups);
+ uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const {
+ ASSERT(group < kMaxBindGroupsTyped);
return mCbvUavSrvRootParameterInfo[group];
}
- uint32_t PipelineLayout::GetSamplerRootParameterIndex(uint32_t group) const {
- ASSERT(group < kMaxBindGroups);
+ uint32_t PipelineLayout::GetSamplerRootParameterIndex(BindGroupIndex group) const {
+ ASSERT(group < kMaxBindGroupsTyped);
return mSamplerRootParameterInfo[group];
}
@@ -190,11 +191,13 @@ namespace dawn_native { namespace d3d12 {
return mRootSignature.Get();
}
- uint32_t PipelineLayout::GetDynamicRootParameterIndex(uint32_t group,
+ uint32_t PipelineLayout::GetDynamicRootParameterIndex(BindGroupIndex group,
BindingIndex bindingIndex) const {
- ASSERT(group < kMaxBindGroups);
- ASSERT(bindingIndex < kMaxBindingsPerGroup);
+ ASSERT(group < kMaxBindGroupsTyped);
+ ASSERT(bindingIndex < kMaxBindingsPerGroupTyped);
ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).hasDynamicOffset);
+ ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).visibility !=
+ wgpu::ShaderStage::None);
return mDynamicRootParameterIndices[group][bindingIndex];
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
index 5bcc1addac5..8de41b06773 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
@@ -15,6 +15,7 @@
#ifndef DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
#define DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
+#include "common/ityp_array.h"
#include "dawn_native/BindingInfo.h"
#include "dawn_native/PipelineLayout.h"
#include "dawn_native/d3d12/d3d12_platform.h"
@@ -28,11 +29,12 @@ namespace dawn_native { namespace d3d12 {
static ResultOrError<PipelineLayout*> Create(Device* device,
const PipelineLayoutDescriptor* descriptor);
- uint32_t GetCbvUavSrvRootParameterIndex(uint32_t group) const;
- uint32_t GetSamplerRootParameterIndex(uint32_t group) const;
+ uint32_t GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const;
+ uint32_t GetSamplerRootParameterIndex(BindGroupIndex group) const;
// Returns the index of the root parameter reserved for a dynamic buffer binding
- uint32_t GetDynamicRootParameterIndex(uint32_t group, BindingIndex bindingIndex) const;
+ uint32_t GetDynamicRootParameterIndex(BindGroupIndex group,
+ BindingIndex bindingIndex) const;
ID3D12RootSignature* GetRootSignature() const;
@@ -40,9 +42,11 @@ namespace dawn_native { namespace d3d12 {
~PipelineLayout() override = default;
using PipelineLayoutBase::PipelineLayoutBase;
MaybeError Initialize();
- std::array<uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
- std::array<uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
- std::array<std::array<uint32_t, kMaxBindingsPerGroup>, kMaxBindGroups>
+ ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
+ ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
+ ityp::array<BindGroupIndex,
+ ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup>,
+ kMaxBindGroups>
mDynamicRootParameterIndices;
ComPtr<ID3D12RootSignature> mRootSignature;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
index 82fdedfad9c..f629e76f261 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
@@ -310,50 +310,41 @@ namespace dawn_native { namespace d3d12 {
D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
- PerStage<ComPtr<ID3DBlob>> compiledShader;
- ComPtr<ID3DBlob> errors;
+ PerStage<const char*> entryPoints;
+ entryPoints[SingleShaderStage::Vertex] = descriptor->vertexStage.entryPoint;
+ entryPoints[SingleShaderStage::Fragment] = descriptor->fragmentStage->entryPoint;
+
+ PerStage<ShaderModule*> modules;
+ modules[SingleShaderStage::Vertex] = ToBackend(descriptor->vertexStage.module);
+ modules[SingleShaderStage::Fragment] = ToBackend(descriptor->fragmentStage->module);
+
+ PerStage<D3D12_SHADER_BYTECODE*> shaders;
+ shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
+ shaders[SingleShaderStage::Fragment] = &descriptorD3D12.PS;
+
+ PerStage<ComPtr<ID3DBlob>> compiledFXCShader;
+ PerStage<ComPtr<IDxcBlob>> compiledDXCShader;
wgpu::ShaderStage renderStages = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
for (auto stage : IterateStages(renderStages)) {
- ShaderModule* module = nullptr;
- const char* entryPoint = nullptr;
- const char* compileTarget = nullptr;
- D3D12_SHADER_BYTECODE* shader = nullptr;
- switch (stage) {
- case SingleShaderStage::Vertex:
- module = ToBackend(descriptor->vertexStage.module);
- entryPoint = descriptor->vertexStage.entryPoint;
- shader = &descriptorD3D12.VS;
- compileTarget = "vs_5_1";
- break;
- case SingleShaderStage::Fragment:
- module = ToBackend(descriptor->fragmentStage->module);
- entryPoint = descriptor->fragmentStage->entryPoint;
- shader = &descriptorD3D12.PS;
- compileTarget = "ps_5_1";
- break;
- default:
- UNREACHABLE();
- break;
- }
std::string hlslSource;
- DAWN_TRY_ASSIGN(hlslSource, module->GetHLSLSource(ToBackend(GetLayout())));
-
- const PlatformFunctions* functions = device->GetFunctions();
- MaybeError error = CheckHRESULT(
- functions->d3dCompile(hlslSource.c_str(), hlslSource.length(), nullptr, nullptr,
- nullptr, entryPoint, compileTarget, compileFlags, 0,
- &compiledShader[stage], &errors),
- "D3DCompile");
- if (error.IsError()) {
- dawn::WarningLog() << reinterpret_cast<char*>(errors->GetBufferPointer());
- DAWN_TRY(std::move(error));
- }
+ DAWN_TRY_ASSIGN(hlslSource, modules[stage]->GetHLSLSource(ToBackend(GetLayout())));
+
+ if (device->IsToggleEnabled(Toggle::UseDXC)) {
+ DAWN_TRY_ASSIGN(compiledDXCShader[stage],
+ modules[stage]->CompileShaderDXC(stage, hlslSource,
+ entryPoints[stage], compileFlags));
+
+ shaders[stage]->pShaderBytecode = compiledDXCShader[stage]->GetBufferPointer();
+ shaders[stage]->BytecodeLength = compiledDXCShader[stage]->GetBufferSize();
+ } else {
+ DAWN_TRY_ASSIGN(compiledFXCShader[stage],
+ modules[stage]->CompileShaderFXC(stage, hlslSource,
+ entryPoints[stage], compileFlags));
- if (shader != nullptr) {
- shader->pShaderBytecode = compiledShader[stage]->GetBufferPointer();
- shader->BytecodeLength = compiledShader[stage]->GetBufferSize();
+ shaders[stage]->pShaderBytecode = compiledFXCShader[stage]->GetBufferPointer();
+ shaders[stage]->BytecodeLength = compiledFXCShader[stage]->GetBufferSize();
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
index c65d56a2422..053a1690fdb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
@@ -30,49 +30,49 @@ namespace dawn_native { namespace d3d12 {
}
// Increments number of locks on a heap to ensure the heap remains resident.
- MaybeError ResidencyManager::LockHeap(Heap* heap) {
+ MaybeError ResidencyManager::LockAllocation(Pageable* pageable) {
if (!mResidencyManagementEnabled) {
return {};
}
// If the heap isn't already resident, make it resident.
- if (!heap->IsInResidencyLRUCache() && !heap->IsResidencyLocked()) {
- DAWN_TRY(EnsureCanMakeResident(heap->GetSize(),
- GetMemorySegmentInfo(heap->GetMemorySegment())));
- ID3D12Pageable* pageable = heap->GetD3D12Pageable();
- DAWN_TRY(CheckHRESULT(mDevice->GetD3D12Device()->MakeResident(1, &pageable),
- "Making a scheduled-to-be-used resource resident"));
+ if (!pageable->IsInResidencyLRUCache() && !pageable->IsResidencyLocked()) {
+ ID3D12Pageable* d3d12Pageable = pageable->GetD3D12Pageable();
+ uint64_t size = pageable->GetSize();
+
+ DAWN_TRY(MakeAllocationsResident(GetMemorySegmentInfo(pageable->GetMemorySegment()),
+ size, 1, &d3d12Pageable));
}
// Since we can't evict the heap, it's unnecessary to track the heap in the LRU Cache.
- if (heap->IsInResidencyLRUCache()) {
- heap->RemoveFromList();
+ if (pageable->IsInResidencyLRUCache()) {
+ pageable->RemoveFromList();
}
- heap->IncrementResidencyLock();
+ pageable->IncrementResidencyLock();
return {};
}
// Decrements number of locks on a heap. When the number of locks becomes zero, the heap is
// inserted into the LRU cache and becomes eligible for eviction.
- void ResidencyManager::UnlockHeap(Heap* heap) {
+ void ResidencyManager::UnlockAllocation(Pageable* pageable) {
if (!mResidencyManagementEnabled) {
return;
}
- ASSERT(heap->IsResidencyLocked());
- ASSERT(!heap->IsInResidencyLRUCache());
- heap->DecrementResidencyLock();
+ ASSERT(pageable->IsResidencyLocked());
+ ASSERT(!pageable->IsInResidencyLRUCache());
+ pageable->DecrementResidencyLock();
// If another lock still exists on the heap, nothing further should be done.
- if (heap->IsResidencyLocked()) {
+ if (pageable->IsResidencyLocked()) {
return;
}
// When all locks have been removed, the resource remains resident and becomes tracked in
// the corresponding LRU.
- TrackResidentAllocation(heap);
+ TrackResidentAllocation(pageable);
}
// Returns the appropriate MemorySegmentInfo for a given MemorySegment.
@@ -144,12 +144,19 @@ namespace dawn_native { namespace d3d12 {
// Removes a heap from the LRU and returns the least recently used heap when possible. Returns
// nullptr when nothing further can be evicted.
- ResultOrError<Heap*> ResidencyManager::RemoveSingleEntryFromLRU(
+ ResultOrError<Pageable*> ResidencyManager::RemoveSingleEntryFromLRU(
MemorySegmentInfo* memorySegment) {
- ASSERT(!memorySegment->lruCache.empty());
- Heap* heap = memorySegment->lruCache.head()->value();
+ // If the LRU is empty, return nullptr to allow execution to continue. Note that fully
+ // emptying the LRU is undesirable, because it can mean either 1) the LRU is not accurately
+ // accounting for Dawn's GPU allocations, or 2) a component external to Dawn is using all of
+ // the process budget and starving Dawn, which will cause thrash.
+ if (memorySegment->lruCache.empty()) {
+ return nullptr;
+ }
+
+ Pageable* pageable = memorySegment->lruCache.head()->value();
- Serial lastSubmissionSerial = heap->GetLastSubmission();
+ Serial lastSubmissionSerial = pageable->GetLastSubmission();
// If the next candidate for eviction was inserted into the LRU during the current serial,
// it is because more memory is being used in a single command list than is available.
@@ -164,8 +171,8 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY(mDevice->WaitForSerial(lastSubmissionSerial));
}
- heap->RemoveFromList();
- return heap;
+ pageable->RemoveFromList();
+ return pageable;
}
MaybeError ResidencyManager::EnsureCanAllocate(uint64_t allocationSize,
@@ -174,14 +181,19 @@ namespace dawn_native { namespace d3d12 {
return {};
}
- return EnsureCanMakeResident(allocationSize, GetMemorySegmentInfo(memorySegment));
+ uint64_t bytesEvicted;
+ DAWN_TRY_ASSIGN(bytesEvicted,
+ EnsureCanMakeResident(allocationSize, GetMemorySegmentInfo(memorySegment)));
+
+ return {};
}
// Any time we need to make something resident, we must check that we have enough free memory to
// make the new object resident while also staying within budget. If there isn't enough
- // memory, we should evict until there is.
- MaybeError ResidencyManager::EnsureCanMakeResident(uint64_t sizeToMakeResident,
- MemorySegmentInfo* memorySegment) {
+ // memory, we should evict until there is. Returns the number of bytes evicted.
+ ResultOrError<uint64_t> ResidencyManager::EnsureCanMakeResident(
+ uint64_t sizeToMakeResident,
+ MemorySegmentInfo* memorySegment) {
ASSERT(mResidencyManagementEnabled);
UpdateMemorySegmentInfo(memorySegment);
@@ -190,23 +202,23 @@ namespace dawn_native { namespace d3d12 {
// Return when we can call MakeResident and remain under budget.
if (memoryUsageAfterMakeResident < memorySegment->budget) {
- return {};
+ return 0;
}
std::vector<ID3D12Pageable*> resourcesToEvict;
uint64_t sizeNeededToBeUnderBudget = memoryUsageAfterMakeResident - memorySegment->budget;
uint64_t sizeEvicted = 0;
while (sizeEvicted < sizeNeededToBeUnderBudget) {
- Heap* heap;
- DAWN_TRY_ASSIGN(heap, RemoveSingleEntryFromLRU(memorySegment));
+ Pageable* pageable;
+ DAWN_TRY_ASSIGN(pageable, RemoveSingleEntryFromLRU(memorySegment));
// If no heap was returned, then nothing more can be evicted.
- if (heap == nullptr) {
+ if (pageable == nullptr) {
break;
}
- sizeEvicted += heap->GetSize();
- resourcesToEvict.push_back(heap->GetD3D12Pageable());
+ sizeEvicted += pageable->GetSize();
+ resourcesToEvict.push_back(pageable->GetD3D12Pageable());
}
if (resourcesToEvict.size() > 0) {
@@ -215,7 +227,7 @@ namespace dawn_native { namespace d3d12 {
"Evicting resident heaps to free memory"));
}
- return {};
+ return sizeEvicted;
}
// Given a list of heaps that are pending usage, this function will estimate memory needed,
@@ -226,7 +238,8 @@ namespace dawn_native { namespace d3d12 {
return {};
}
- std::vector<ID3D12Pageable*> heapsToMakeResident;
+ std::vector<ID3D12Pageable*> localHeapsToMakeResident;
+ std::vector<ID3D12Pageable*> nonLocalHeapsToMakeResident;
uint64_t localSizeToMakeResident = 0;
uint64_t nonLocalSizeToMakeResident = 0;
@@ -244,11 +257,12 @@ namespace dawn_native { namespace d3d12 {
// update its position in the LRU.
heap->RemoveFromList();
} else {
- heapsToMakeResident.push_back(heap->GetD3D12Pageable());
if (heap->GetMemorySegment() == MemorySegment::Local) {
localSizeToMakeResident += heap->GetSize();
+ localHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
} else {
nonLocalSizeToMakeResident += heap->GetSize();
+ nonLocalHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
}
}
@@ -263,37 +277,73 @@ namespace dawn_native { namespace d3d12 {
}
if (localSizeToMakeResident > 0) {
- DAWN_TRY(EnsureCanMakeResident(localSizeToMakeResident, &mVideoMemoryInfo.local));
+ return MakeAllocationsResident(&mVideoMemoryInfo.local, localSizeToMakeResident,
+ localHeapsToMakeResident.size(),
+ localHeapsToMakeResident.data());
}
if (nonLocalSizeToMakeResident > 0) {
ASSERT(!mDevice->GetDeviceInfo().isUMA);
- DAWN_TRY(EnsureCanMakeResident(nonLocalSizeToMakeResident, &mVideoMemoryInfo.nonLocal));
+ return MakeAllocationsResident(&mVideoMemoryInfo.nonLocal, nonLocalSizeToMakeResident,
+ nonLocalHeapsToMakeResident.size(),
+ nonLocalHeapsToMakeResident.data());
}
- if (heapsToMakeResident.size() != 0) {
- // Note that MakeResident is a synchronous function and can add a significant
- // overhead to command recording. In the future, it may be possible to decrease this
- // overhead by using MakeResident on a secondary thread, or by instead making use of
- // the EnqueueMakeResident function (which is not available on all Windows 10
- // platforms).
- DAWN_TRY(CheckHRESULT(mDevice->GetD3D12Device()->MakeResident(
- heapsToMakeResident.size(), heapsToMakeResident.data()),
- "Making scheduled-to-be-used resources resident"));
+ return {};
+ }
+
+ MaybeError ResidencyManager::MakeAllocationsResident(MemorySegmentInfo* segment,
+ uint64_t sizeToMakeResident,
+ uint64_t numberOfObjectsToMakeResident,
+ ID3D12Pageable** allocations) {
+ uint64_t bytesEvicted;
+ DAWN_TRY_ASSIGN(bytesEvicted, EnsureCanMakeResident(sizeToMakeResident, segment));
+
+ // Note that MakeResident is a synchronous function and can add a significant
+ // overhead to command recording. In the future, it may be possible to decrease this
+ // overhead by using MakeResident on a secondary thread, or by instead making use of
+ // the EnqueueMakeResident function (which is not available on all Windows 10
+ // platforms).
+ HRESULT hr =
+ mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
+
+ // A MakeResident call can fail if there's not enough available memory. This
+ // could occur when there's significant fragmentation or if the allocation size
+ // estimates are incorrect. We may be able to continue execution by evicting some
+ // more memory and calling MakeResident again.
+ while (FAILED(hr)) {
+ constexpr uint32_t kAdditonalSizeToEvict = 50000000; // 50MB
+
+ uint64_t sizeEvicted = 0;
+
+ DAWN_TRY_ASSIGN(sizeEvicted, EnsureCanMakeResident(kAdditonalSizeToEvict, segment));
+
+ // If nothing can be evicted after MakeResident has failed, we cannot continue
+ // execution and must throw a fatal error.
+ if (sizeEvicted == 0) {
+ return DAWN_OUT_OF_MEMORY_ERROR(
+ "MakeResident has failed due to excessive video memory usage.");
+ }
+
+ hr =
+ mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
}
return {};
}
// Inserts a heap at the bottom of the LRU. The passed heap must be resident or scheduled to
- // become resident within the current serial.
- void ResidencyManager::TrackResidentAllocation(Heap* heap) {
+ // become resident within the current serial. Failing to call this function when an allocation
+ // is implicitly made resident will cause the residency manager to view the allocation as
+ // non-resident and call MakeResident - which will make D3D12's internal residency refcount on
+ // the allocation out of sync with Dawn.
+ void ResidencyManager::TrackResidentAllocation(Pageable* pageable) {
if (!mResidencyManagementEnabled) {
return;
}
- ASSERT(heap->IsInList() == false);
- GetMemorySegmentInfo(heap->GetMemorySegment())->lruCache.Append(heap);
+ ASSERT(pageable->IsInList() == false);
+ GetMemorySegmentInfo(pageable->GetMemorySegment())->lruCache.Append(pageable);
}
// Places an artifical cap on Dawn's budget so we can test in a predictable manner. If used,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
index abd6add72e2..304a211b084 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
@@ -27,13 +27,14 @@ namespace dawn_native { namespace d3d12 {
class Device;
class Heap;
+ class Pageable;
class ResidencyManager {
public:
ResidencyManager(Device* device);
- MaybeError LockHeap(Heap* heap);
- void UnlockHeap(Heap* heap);
+ MaybeError LockAllocation(Pageable* pageable);
+ void UnlockAllocation(Pageable* pageable);
MaybeError EnsureCanAllocate(uint64_t allocationSize, MemorySegment memorySegment);
MaybeError EnsureHeapsAreResident(Heap** heaps, size_t heapCount);
@@ -41,14 +42,14 @@ namespace dawn_native { namespace d3d12 {
uint64_t SetExternalMemoryReservation(MemorySegment segment,
uint64_t requestedReservationSize);
- void TrackResidentAllocation(Heap* heap);
+ void TrackResidentAllocation(Pageable* pageable);
void RestrictBudgetForTesting(uint64_t artificialBudgetCap);
private:
struct MemorySegmentInfo {
const DXGI_MEMORY_SEGMENT_GROUP dxgiSegment;
- LinkedList<Heap> lruCache = {};
+ LinkedList<Pageable> lruCache = {};
uint64_t budget = 0;
uint64_t usage = 0;
uint64_t externalReservation = 0;
@@ -61,8 +62,13 @@ namespace dawn_native { namespace d3d12 {
};
MemorySegmentInfo* GetMemorySegmentInfo(MemorySegment memorySegment);
- MaybeError EnsureCanMakeResident(uint64_t allocationSize, MemorySegmentInfo* memorySegment);
- ResultOrError<Heap*> RemoveSingleEntryFromLRU(MemorySegmentInfo* memorySegment);
+ ResultOrError<uint64_t> EnsureCanMakeResident(uint64_t allocationSize,
+ MemorySegmentInfo* memorySegment);
+ ResultOrError<Pageable*> RemoveSingleEntryFromLRU(MemorySegmentInfo* memorySegment);
+ MaybeError MakeAllocationsResident(MemorySegmentInfo* segment,
+ uint64_t sizeToMakeResident,
+ uint64_t numberOfObjectsToMakeResident,
+ ID3D12Pageable** allocations);
void UpdateVideoMemoryInfo();
void UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
index 7f4738649ee..4e48f2edafc 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
@@ -185,8 +185,12 @@ namespace dawn_native { namespace d3d12 {
ResourceHeapAllocation directAllocation;
DAWN_TRY_ASSIGN(directAllocation,
CreateCommittedResource(heapType, resourceDescriptor, initialUsage));
+ if (directAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+ return std::move(directAllocation);
+ }
- return std::move(directAllocation);
+ // If direct allocation fails, the system is probably out of memory.
+ return DAWN_OUT_OF_MEMORY_ERROR("Allocation failed");
}
void ResourceAllocatorManager::Tick(Serial completedSerial) {
@@ -282,7 +286,7 @@ namespace dawn_native { namespace d3d12 {
// Before calling CreatePlacedResource, we must ensure the target heap is resident.
// CreatePlacedResource will fail if it is not.
- DAWN_TRY(mDevice->GetResidencyManager()->LockHeap(heap));
+ DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(heap));
// With placed resources, a single heap can be reused.
// The resource placed at an offset is only reclaimed
@@ -300,7 +304,7 @@ namespace dawn_native { namespace d3d12 {
// After CreatePlacedResource has finished, the heap can be unlocked from residency. This
// will insert it into the residency LRU.
- mDevice->GetResidencyManager()->UnlockHeap(heap);
+ mDevice->GetResidencyManager()->UnlockAllocation(heap);
return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
std::move(placedResource), heap};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp
new file mode 100644
index 00000000000..224051a1c42
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp
@@ -0,0 +1,167 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/SamplerHeapCacheD3D12.h"
+
+#include "common/Assert.h"
+#include "common/HashUtils.h"
+#include "dawn_native/d3d12/BindGroupD3D12.h"
+#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/Forward.h"
+#include "dawn_native/d3d12/SamplerD3D12.h"
+#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ SamplerHeapCacheEntry::SamplerHeapCacheEntry(std::vector<Sampler*> samplers)
+ : mSamplers(std::move(samplers)) {
+ }
+
+ SamplerHeapCacheEntry::SamplerHeapCacheEntry(SamplerHeapCache* cache,
+ StagingDescriptorAllocator* allocator,
+ std::vector<Sampler*> samplers,
+ CPUDescriptorHeapAllocation allocation)
+ : mCPUAllocation(std::move(allocation)),
+ mSamplers(std::move(samplers)),
+ mAllocator(allocator),
+ mCache(cache) {
+ ASSERT(mCache != nullptr);
+ ASSERT(mCPUAllocation.IsValid());
+ ASSERT(!mSamplers.empty());
+ }
+
+ std::vector<Sampler*>&& SamplerHeapCacheEntry::AcquireSamplers() {
+ return std::move(mSamplers);
+ }
+
+ SamplerHeapCacheEntry::~SamplerHeapCacheEntry() {
+ // If this is a blueprint then the CPU allocation cannot exist and has no entry to remove.
+ if (mCPUAllocation.IsValid()) {
+ mCache->RemoveCacheEntry(this);
+ mAllocator->Deallocate(&mCPUAllocation);
+ }
+
+ ASSERT(!mCPUAllocation.IsValid());
+ }
+
+ bool SamplerHeapCacheEntry::Populate(Device* device,
+ ShaderVisibleDescriptorAllocator* allocator) {
+ if (allocator->IsAllocationStillValid(mGPUAllocation)) {
+ return true;
+ }
+
+ ASSERT(!mSamplers.empty());
+
+ // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+ // If either failed, return early to re-allocate and switch the heaps.
+ const uint32_t descriptorCount = mSamplers.size();
+ D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
+ if (!allocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
+ &baseCPUDescriptor, &mGPUAllocation)) {
+ return false;
+ }
+
+ // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
+ // simple copies per bindgroup, a single non-simple copy could be issued.
+ // TODO(dawn:155): Consider doing this optimization.
+ device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+ mCPUAllocation.GetBaseDescriptor(),
+ D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+
+ return true;
+ }
+
+ D3D12_GPU_DESCRIPTOR_HANDLE SamplerHeapCacheEntry::GetBaseDescriptor() const {
+ return mGPUAllocation.GetBaseDescriptor();
+ }
+
+ ResultOrError<Ref<SamplerHeapCacheEntry>> SamplerHeapCache::GetOrCreate(
+ const BindGroup* group,
+ StagingDescriptorAllocator* samplerAllocator) {
+ const BindGroupLayout* bgl = ToBackend(group->GetLayout());
+
+ // If a previously created bindgroup used the same samplers, the backing sampler heap
+ // allocation can be reused. The packed list of samplers acts as the key to lookup the
+ // allocation in a cache.
+ // TODO(dawn:155): Avoid re-allocating the vector each lookup.
+ std::vector<Sampler*> samplers;
+ samplers.reserve(bgl->GetSamplerDescriptorCount());
+
+ for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
+ bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+ if (bindingInfo.type == wgpu::BindingType::Sampler ||
+ bindingInfo.type == wgpu::BindingType::ComparisonSampler) {
+ samplers.push_back(ToBackend(group->GetBindingAsSampler(bindingIndex)));
+ }
+ }
+
+ // Check the cache if there exists a sampler heap allocation that corresponds to the
+ // samplers.
+ SamplerHeapCacheEntry blueprint(std::move(samplers));
+ auto iter = mCache.find(&blueprint);
+ if (iter != mCache.end()) {
+ return Ref<SamplerHeapCacheEntry>(*iter);
+ }
+
+ // Steal the sampler vector back from the blueprint to avoid creating a new copy for the
+ // real entry below.
+ samplers = std::move(blueprint.AcquireSamplers());
+
+ CPUDescriptorHeapAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation, samplerAllocator->AllocateCPUDescriptors());
+
+ const uint32_t samplerSizeIncrement = samplerAllocator->GetSizeIncrement();
+ ID3D12Device* d3d12Device = mDevice->GetD3D12Device();
+
+ for (uint32_t i = 0; i < samplers.size(); ++i) {
+ const auto& samplerDesc = samplers[i]->GetSamplerDescriptor();
+ d3d12Device->CreateSampler(&samplerDesc,
+ allocation.OffsetFrom(samplerSizeIncrement, i));
+ }
+
+ Ref<SamplerHeapCacheEntry> entry = AcquireRef(new SamplerHeapCacheEntry(
+ this, samplerAllocator, std::move(samplers), std::move(allocation)));
+ mCache.insert(entry.Get());
+ return std::move(entry);
+ }
+
+ SamplerHeapCache::SamplerHeapCache(Device* device) : mDevice(device) {
+ }
+
+ SamplerHeapCache::~SamplerHeapCache() {
+ ASSERT(mCache.empty());
+ }
+
+ void SamplerHeapCache::RemoveCacheEntry(SamplerHeapCacheEntry* entry) {
+ ASSERT(entry->GetRefCountForTesting() == 0);
+ size_t removedCount = mCache.erase(entry);
+ ASSERT(removedCount == 1);
+ }
+
+ size_t SamplerHeapCacheEntry::HashFunc::operator()(const SamplerHeapCacheEntry* entry) const {
+ size_t hash = 0;
+ for (const Sampler* sampler : entry->mSamplers) {
+ HashCombine(&hash, sampler);
+ }
+ return hash;
+ }
+
+ bool SamplerHeapCacheEntry::EqualityFunc::operator()(const SamplerHeapCacheEntry* a,
+ const SamplerHeapCacheEntry* b) const {
+ return a->mSamplers == b->mSamplers;
+ }
+}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h
new file mode 100644
index 00000000000..2f41086c8b2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h
@@ -0,0 +1,108 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
+#define DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
+
+#include "common/RefCounted.h"
+#include "dawn_native/BindingInfo.h"
+#include "dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
+#include "dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+
+#include <unordered_set>
+#include <vector>
+
+// |SamplerHeapCacheEntry| maintains a cache of sampler descriptor heap allocations.
+// Each entry represents one or more sampler descriptors that co-exist in a CPU and
+// GPU descriptor heap. The CPU-side allocation is deallocated once the final reference
+// has been released while the GPU-side allocation is deallocated when the GPU is finished.
+//
+// The BindGroupLayout hands out these entries upon constructing the bindgroup. If the entry is not
+// invalid, it will allocate and initialize so it may be reused by another bindgroup.
+//
+// The cache is primary needed for the GPU sampler heap, which is much smaller than the view heap
+// and switches incur expensive pipeline flushes.
+namespace dawn_native { namespace d3d12 {
+
+ class BindGroup;
+ class Device;
+ class Sampler;
+ class SamplerHeapCache;
+ class StagingDescriptorAllocator;
+ class ShaderVisibleDescriptorAllocator;
+
+ // Wraps sampler descriptor heap allocations in a cache.
+ class SamplerHeapCacheEntry : public RefCounted {
+ public:
+ SamplerHeapCacheEntry() = default;
+ SamplerHeapCacheEntry(std::vector<Sampler*> samplers);
+ SamplerHeapCacheEntry(SamplerHeapCache* cache,
+ StagingDescriptorAllocator* allocator,
+ std::vector<Sampler*> samplers,
+ CPUDescriptorHeapAllocation allocation);
+ ~SamplerHeapCacheEntry() override;
+
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+
+ std::vector<Sampler*>&& AcquireSamplers();
+
+ bool Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator);
+
+ // Functors necessary for the unordered_map<SamplerHeapCacheEntry*>-based cache.
+ struct HashFunc {
+ size_t operator()(const SamplerHeapCacheEntry* entry) const;
+ };
+
+ struct EqualityFunc {
+ bool operator()(const SamplerHeapCacheEntry* a, const SamplerHeapCacheEntry* b) const;
+ };
+
+ private:
+ CPUDescriptorHeapAllocation mCPUAllocation;
+ GPUDescriptorHeapAllocation mGPUAllocation;
+
+ // Storing raw pointer because the sampler object will be already hashed
+ // by the device and will already be unique.
+ std::vector<Sampler*> mSamplers;
+
+ StagingDescriptorAllocator* mAllocator = nullptr;
+ SamplerHeapCache* mCache = nullptr;
+ };
+
+ // Cache descriptor heap allocations so that we don't create duplicate ones for every
+ // BindGroup.
+ class SamplerHeapCache {
+ public:
+ SamplerHeapCache(Device* device);
+ ~SamplerHeapCache();
+
+ ResultOrError<Ref<SamplerHeapCacheEntry>> GetOrCreate(
+ const BindGroup* group,
+ StagingDescriptorAllocator* samplerAllocator);
+
+ void RemoveCacheEntry(SamplerHeapCacheEntry* entry);
+
+ private:
+ Device* mDevice;
+
+ using Cache = std::unordered_set<SamplerHeapCacheEntry*,
+ SamplerHeapCacheEntry::HashFunc,
+ SamplerHeapCacheEntry::EqualityFunc>;
+
+ Cache mCache;
+ };
+
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
index d6410aa76da..770a006fb42 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
@@ -16,14 +16,68 @@
#include "common/Assert.h"
#include "common/BitSetIterator.h"
+#include "common/Log.h"
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn_native/d3d12/PlatformFunctions.h"
+#include "dawn_native/d3d12/UtilsD3D12.h"
+
+#include <d3dcompiler.h>
#include <spirv_hlsl.hpp>
namespace dawn_native { namespace d3d12 {
+ namespace {
+ std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags) {
+ std::vector<const wchar_t*> arguments;
+ if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
+ arguments.push_back(L"/Gec");
+ }
+ if (compileFlags & D3DCOMPILE_IEEE_STRICTNESS) {
+ arguments.push_back(L"/Gis");
+ }
+ if (compileFlags & D3DCOMPILE_OPTIMIZATION_LEVEL2) {
+ switch (compileFlags & D3DCOMPILE_OPTIMIZATION_LEVEL2) {
+ case D3DCOMPILE_OPTIMIZATION_LEVEL0:
+ arguments.push_back(L"/O0");
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL2:
+ arguments.push_back(L"/O2");
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL3:
+ arguments.push_back(L"/O3");
+ break;
+ }
+ }
+ if (compileFlags & D3DCOMPILE_DEBUG) {
+ arguments.push_back(L"/Zi");
+ }
+ if (compileFlags & D3DCOMPILE_PACK_MATRIX_ROW_MAJOR) {
+ arguments.push_back(L"/Zpr");
+ }
+ if (compileFlags & D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR) {
+ arguments.push_back(L"/Zpc");
+ }
+ if (compileFlags & D3DCOMPILE_AVOID_FLOW_CONTROL) {
+ arguments.push_back(L"/Gfa");
+ }
+ if (compileFlags & D3DCOMPILE_PREFER_FLOW_CONTROL) {
+ arguments.push_back(L"/Gfp");
+ }
+ if (compileFlags & D3DCOMPILE_RESOURCES_MAY_ALIAS) {
+ arguments.push_back(L"/res_may_alias");
+ }
+ // Enable FXC backward compatibility by setting the language version to 2016
+ arguments.push_back(L"-HV");
+ arguments.push_back(L"2016");
+ return arguments;
+ }
+
+ } // anonymous namespace
+
// static
ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
const ShaderModuleDescriptor* descriptor) {
@@ -37,6 +91,7 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError ShaderModule::Initialize() {
+ DAWN_TRY(InitializeBase());
const std::vector<uint32_t>& spirv = GetSpirv();
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
@@ -100,7 +155,7 @@ namespace dawn_native { namespace d3d12 {
}
const ModuleBindingInfo& moduleBindingInfo = GetBindingInfo();
- for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
const auto& bindingOffsets = bgl->GetBindingOffsets();
const auto& groupBindingInfo = moduleBindingInfo[group];
@@ -109,6 +164,13 @@ namespace dawn_native { namespace d3d12 {
BindingNumber bindingNumber = it.first;
BindingIndex bindingIndex = bgl->GetBindingIndex(bindingNumber);
+ // Declaring a read-only storage buffer in HLSL but specifying a storage buffer in
+ // the BGL produces the wrong output. Force read-only storage buffer bindings to
+ // be treated as UAV instead of SRV.
+ const bool forceStorageBufferAsUAV =
+ (bindingInfo.type == wgpu::BindingType::ReadonlyStorageBuffer &&
+ bgl->GetBindingInfo(bindingIndex).type == wgpu::BindingType::StorageBuffer);
+
uint32_t bindingOffset = bindingOffsets[bindingIndex];
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
DAWN_TRY(CheckSpvcSuccess(
@@ -116,8 +178,18 @@ namespace dawn_native { namespace d3d12 {
bindingOffset),
"Unable to set decorating binding before generating HLSL shader w/ "
"spvc"));
+ if (forceStorageBufferAsUAV) {
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.SetHLSLForceStorageBufferAsUAV(
+ static_cast<uint32_t>(group), static_cast<uint32_t>(bindingNumber)),
+ "Unable to force read-only storage buffer as UAV w/ spvc"));
+ }
} else {
compiler->set_decoration(bindingInfo.id, spv::DecorationBinding, bindingOffset);
+ if (forceStorageBufferAsUAV) {
+ compiler->set_hlsl_force_storage_buffer_as_uav(
+ static_cast<uint32_t>(group), static_cast<uint32_t>(bindingNumber));
+ }
}
}
}
@@ -134,4 +206,92 @@ namespace dawn_native { namespace d3d12 {
}
}
+ ResultOrError<ComPtr<IDxcBlob>> ShaderModule::CompileShaderDXC(SingleShaderStage stage,
+ const std::string& hlslSource,
+ const char* entryPoint,
+ uint32_t compileFlags) {
+ const wchar_t* targetProfile = nullptr;
+ switch (stage) {
+ case SingleShaderStage::Vertex:
+ targetProfile = L"vs_6_0";
+ break;
+ case SingleShaderStage::Fragment:
+ targetProfile = L"ps_6_0";
+ break;
+ case SingleShaderStage::Compute:
+ targetProfile = L"cs_6_0";
+ break;
+ }
+
+ IDxcLibrary* dxcLibrary;
+ DAWN_TRY_ASSIGN(dxcLibrary, ToBackend(GetDevice())->GetOrCreateDxcLibrary());
+
+ ComPtr<IDxcBlobEncoding> sourceBlob;
+ DAWN_TRY(CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
+ hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
+ "DXC create blob"));
+
+ IDxcCompiler* dxcCompiler;
+ DAWN_TRY_ASSIGN(dxcCompiler, ToBackend(GetDevice())->GetOrCreateDxcCompiler());
+
+ std::wstring entryPointW;
+ DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(entryPoint));
+
+ std::vector<const wchar_t*> arguments = GetDXCArguments(compileFlags);
+
+ ComPtr<IDxcOperationResult> result;
+ DAWN_TRY(CheckHRESULT(
+ dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(), targetProfile,
+ arguments.data(), arguments.size(), nullptr, 0, nullptr, &result),
+ "DXC compile"));
+
+ HRESULT hr;
+ DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
+
+ if (FAILED(hr)) {
+ ComPtr<IDxcBlobEncoding> errors;
+ DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
+
+ std::string message = std::string("DXC compile failed with ") +
+ static_cast<char*>(errors->GetBufferPointer());
+ return DAWN_INTERNAL_ERROR(message);
+ }
+
+ ComPtr<IDxcBlob> compiledShader;
+ DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
+ return std::move(compiledShader);
+ }
+
+ ResultOrError<ComPtr<ID3DBlob>> ShaderModule::CompileShaderFXC(SingleShaderStage stage,
+ const std::string& hlslSource,
+ const char* entryPoint,
+ uint32_t compileFlags) {
+ const char* targetProfile = nullptr;
+ switch (stage) {
+ case SingleShaderStage::Vertex:
+ targetProfile = "vs_5_1";
+ break;
+ case SingleShaderStage::Fragment:
+ targetProfile = "ps_5_1";
+ break;
+ case SingleShaderStage::Compute:
+ targetProfile = "cs_5_1";
+ break;
+ }
+
+ ComPtr<ID3DBlob> compiledShader;
+ ComPtr<ID3DBlob> errors;
+
+ const PlatformFunctions* functions = ToBackend(GetDevice())->GetFunctions();
+ if (FAILED(functions->d3dCompile(hlslSource.c_str(), hlslSource.length(), nullptr, nullptr,
+ nullptr, entryPoint, targetProfile, compileFlags, 0,
+ &compiledShader, &errors))) {
+ std::string message = std::string("D3D compile failed with ") +
+ static_cast<char*>(errors->GetBufferPointer());
+ return DAWN_INTERNAL_ERROR(message);
+ }
+
+ return std::move(compiledShader);
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
index e34d8815a47..c64e8ce6610 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
@@ -17,6 +17,8 @@
#include "dawn_native/ShaderModule.h"
+#include "dawn_native/d3d12/d3d12_platform.h"
+
namespace dawn_native { namespace d3d12 {
class Device;
@@ -29,6 +31,15 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<std::string> GetHLSLSource(PipelineLayout* layout);
+ ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(SingleShaderStage stage,
+ const std::string& hlslSource,
+ const char* entryPoint,
+ uint32_t compileFlags);
+ ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(SingleShaderStage stage,
+ const std::string& hlslSource,
+ const char* entryPoint,
+ uint32_t compileFlags);
+
private:
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
~ShaderModule() override = default;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
index 9c36ac5030c..9039d8ab9d7 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
@@ -16,6 +16,7 @@
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -77,7 +78,7 @@ namespace dawn_native { namespace d3d12 {
return false;
}
- ID3D12DescriptorHeap* descriptorHeap = mHeap.Get();
+ ID3D12DescriptorHeap* descriptorHeap = mHeap->GetD3D12DescriptorHeap();
const uint64_t heapOffset = mSizeIncrement * startOffset;
@@ -99,7 +100,7 @@ namespace dawn_native { namespace d3d12 {
}
ID3D12DescriptorHeap* ShaderVisibleDescriptorAllocator::GetShaderVisibleHeap() const {
- return mHeap.Get();
+ return mHeap->GetD3D12DescriptorHeap();
}
void ShaderVisibleDescriptorAllocator::Tick(uint64_t completedSerial) {
@@ -108,18 +109,19 @@ namespace dawn_native { namespace d3d12 {
// Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeap() {
- ComPtr<ID3D12DescriptorHeap> heap;
+ std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap;
// Return the switched out heap to the pool and retrieve the oldest heap that is no longer
// used by GPU. This maintains a heap buffer to avoid frequently re-creating heaps for heavy
// users.
// TODO(dawn:256): Consider periodically triming to avoid OOM.
if (mHeap != nullptr) {
+ mDevice->GetResidencyManager()->UnlockAllocation(mHeap.get());
mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
}
// Recycle existing heap if possible.
if (!mPool.empty() && mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
- heap = std::move(mPool.front().heap);
+ descriptorHeap = std::move(mPool.front().heap);
mPool.pop_front();
}
@@ -129,19 +131,35 @@ namespace dawn_native { namespace d3d12 {
const uint32_t descriptorCount = GetD3D12ShaderVisibleHeapSize(
mHeapType, mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
- if (heap == nullptr) {
+ if (descriptorHeap == nullptr) {
+ // The size in bytes of a descriptor heap is best calculated by the increment size
+ // multiplied by the number of descriptors. In practice, this is only an estimate and
+ // the actual size may vary depending on the driver.
+ const uint64_t kSize = mSizeIncrement * descriptorCount;
+
+ DAWN_TRY(
+ mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
+
+ ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
heapDescriptor.Type = mHeapType;
heapDescriptor.NumDescriptors = descriptorCount;
heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
heapDescriptor.NodeMask = 0;
- DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
- &heapDescriptor, IID_PPV_ARGS(&heap)),
- "ID3D12Device::CreateDescriptorHeap"));
+ DAWN_TRY(
+ CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
+ &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
+ "ID3D12Device::CreateDescriptorHeap"));
+ descriptorHeap = std::make_unique<ShaderVisibleDescriptorHeap>(
+ std::move(d3d12DescriptorHeap), kSize);
+ // We must track the allocation in the LRU when it is created, otherwise the residency
+ // manager will see the allocation as non-resident in the later call to LockAllocation.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
}
+ DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(descriptorHeap.get()));
// Create a FIFO buffer from the recently created heap.
- mHeap = std::move(heap);
+ mHeap = std::move(descriptorHeap);
mAllocator = RingBufferAllocator(descriptorCount);
// Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
@@ -164,6 +182,15 @@ namespace dawn_native { namespace d3d12 {
return mPool.size();
}
+ bool ShaderVisibleDescriptorAllocator::IsShaderVisibleHeapLockedResidentForTesting() const {
+ return mHeap->IsResidencyLocked();
+ }
+
+ bool ShaderVisibleDescriptorAllocator::IsLastShaderVisibleHeapInLRUForTesting() const {
+ ASSERT(!mPool.empty());
+ return mPool.back().heap->IsInResidencyLRUCache();
+ }
+
bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(
const GPUDescriptorHeapAllocation& allocation) const {
// Consider valid if allocated for the pending submit and the shader visible heaps
@@ -171,4 +198,15 @@ namespace dawn_native { namespace d3d12 {
return (allocation.GetLastUsageSerial() > mDevice->GetCompletedCommandSerial() &&
allocation.GetHeapSerial() == mHeapSerial);
}
+
+ ShaderVisibleDescriptorHeap::ShaderVisibleDescriptorHeap(
+ ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
+ uint64_t size)
+ : Pageable(d3d12DescriptorHeap, MemorySegment::Local, size),
+ mD3d12DescriptorHeap(std::move(d3d12DescriptorHeap)) {
+ }
+
+ ID3D12DescriptorHeap* ShaderVisibleDescriptorHeap::GetD3D12DescriptorHeap() const {
+ return mD3d12DescriptorHeap.Get();
+ }
}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
index be4e83974d2..aec20a3b449 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
@@ -17,6 +17,7 @@
#include "dawn_native/Error.h"
#include "dawn_native/RingBufferAllocator.h"
+#include "dawn_native/d3d12/PageableD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
#include <list>
@@ -32,6 +33,16 @@ namespace dawn_native { namespace d3d12 {
class Device;
class GPUDescriptorHeapAllocation;
+ class ShaderVisibleDescriptorHeap : public Pageable {
+ public:
+ ShaderVisibleDescriptorHeap(ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
+ uint64_t size);
+ ID3D12DescriptorHeap* GetD3D12DescriptorHeap() const;
+
+ private:
+ ComPtr<ID3D12DescriptorHeap> mD3d12DescriptorHeap;
+ };
+
class ShaderVisibleDescriptorAllocator {
public:
static ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>> Create(
@@ -56,16 +67,18 @@ namespace dawn_native { namespace d3d12 {
Serial GetShaderVisibleHeapSerialForTesting() const;
uint64_t GetShaderVisibleHeapSizeForTesting() const;
uint64_t GetShaderVisiblePoolSizeForTesting() const;
+ bool IsShaderVisibleHeapLockedResidentForTesting() const;
+ bool IsLastShaderVisibleHeapInLRUForTesting() const;
bool IsAllocationStillValid(const GPUDescriptorHeapAllocation& allocation) const;
private:
struct SerialDescriptorHeap {
Serial heapSerial;
- ComPtr<ID3D12DescriptorHeap> heap;
+ std::unique_ptr<ShaderVisibleDescriptorHeap> heap;
};
- ComPtr<ID3D12DescriptorHeap> mHeap;
+ std::unique_ptr<ShaderVisibleDescriptorHeap> mHeap;
RingBufferAllocator mAllocator;
std::list<SerialDescriptorHeap> mPool;
D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
index 7df5a670ea7..b3aec3f1d9d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
@@ -44,8 +44,8 @@ namespace dawn_native { namespace d3d12 {
// The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
// evicted. This buffer should already have been made resident when it was created.
- DAWN_TRY(
- mDevice->GetResidencyManager()->LockHeap(ToBackend(mUploadHeap.GetResourceHeap())));
+ DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(
+ ToBackend(mUploadHeap.GetResourceHeap())));
return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
}
@@ -59,7 +59,7 @@ namespace dawn_native { namespace d3d12 {
// The underlying heap was locked in residency upon creation. We must unlock it when this
// buffer becomes unmapped.
- mDevice->GetResidencyManager()->UnlockHeap(ToBackend(mUploadHeap.GetResourceHeap()));
+ mDevice->GetResidencyManager()->UnlockAllocation(ToBackend(mUploadHeap.GetResourceHeap()));
// Invalidate the CPU virtual address & flush cache (if needed).
GetResource()->Unmap(0, nullptr);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
index 45ec25bb979..74798059dfd 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
@@ -48,14 +48,16 @@ namespace dawn_native { namespace d3d12 {
return new Texture(ToBackend(GetDevice()), descriptor, std::move(d3d12Texture));
}
- MaybeError SwapChain::OnBeforePresent(TextureBase* texture) {
+ MaybeError SwapChain::OnBeforePresent(TextureViewBase* view) {
Device* device = ToBackend(GetDevice());
CommandRecordingContext* commandContext;
DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
// Perform the necessary transition for the texture to be presented.
- ToBackend(texture)->TrackUsageAndTransitionNow(commandContext, mTextureUsage);
+ ToBackend(view->GetTexture())
+ ->TrackUsageAndTransitionNow(commandContext, mTextureUsage,
+ view->GetSubresourceRange());
DAWN_TRY(device->ExecutePendingCommandContext());
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
index ee92e09cbf6..6938e20adad 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
@@ -28,7 +28,7 @@ namespace dawn_native { namespace d3d12 {
protected:
~SwapChain() override;
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureBase* texture) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
wgpu::TextureUsage mTextureUsage;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
index cfa1d1eec42..f2a8388220e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
@@ -25,6 +25,8 @@ namespace dawn_native { namespace d3d12 {
uint32_t offset,
uint32_t bytesPerRow,
uint32_t slicePitch) {
+ ASSERT(bytesPerRow != 0);
+ ASSERT(slicePitch != 0);
uint32_t byteOffsetX = offset % bytesPerRow;
offset -= byteOffsetX;
uint32_t byteOffsetY = offset % slicePitch;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
index d6d1e3a12b4..7b86367c62c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
@@ -336,8 +336,8 @@ namespace dawn_native { namespace d3d12 {
return DAWN_VALIDATION_ERROR("Mip level count must be 1");
}
- if (descriptor->arrayLayerCount != 1) {
- return DAWN_VALIDATION_ERROR("Array layer count must be 1");
+ if (descriptor->size.depth != 1) {
+ return DAWN_VALIDATION_ERROR("Depth must be 1");
}
if (descriptor->sampleCount != 1) {
@@ -393,14 +393,18 @@ namespace dawn_native { namespace d3d12 {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
+ // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
+ // GPUTextureDescriptor.size.depth is done.
+ TextureDescriptor fixedDescriptor;
+ DAWN_TRY_ASSIGN(fixedDescriptor, FixTextureDescriptor(device, textureDescriptor));
+ textureDescriptor = &fixedDescriptor;
+
Ref<Texture> dawnTexture =
AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedExternal));
DAWN_TRY(dawnTexture->InitializeAsExternalTexture(textureDescriptor, sharedHandle,
acquireMutexKey, isSwapChainTexture));
-
- dawnTexture->SetIsSubresourceContentInitialized(descriptor->isCleared, 0,
- textureDescriptor->mipLevelCount, 0,
- textureDescriptor->arrayLayerCount);
+ dawnTexture->SetIsSubresourceContentInitialized(descriptor->isCleared,
+ dawnTexture->GetAllSubresources());
return std::move(dawnTexture);
}
@@ -448,6 +452,7 @@ namespace dawn_native { namespace d3d12 {
const Extent3D& size = GetSize();
resourceDescriptor.Width = size.width;
resourceDescriptor.Height = size.height;
+ resourceDescriptor.DepthOrArraySize = size.depth;
// This will need to be much more nuanced when WebGPU has
// texture view compatibility rules.
@@ -458,7 +463,6 @@ namespace dawn_native { namespace d3d12 {
? D3D12TypelessTextureFormat(GetFormat().format)
: D3D12TextureFormat(GetFormat().format);
- resourceDescriptor.DepthOrArraySize = GetDepthOrArraySize();
resourceDescriptor.MipLevels = static_cast<UINT16>(GetNumMipLevels());
resourceDescriptor.Format = dxgiFormat;
resourceDescriptor.SampleDesc.Count = GetSampleCount();
@@ -479,17 +483,24 @@ namespace dawn_native { namespace d3d12 {
CommandRecordingContext* commandContext;
DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
- DAWN_TRY(ClearTexture(commandContext, 0, GetNumMipLevels(), 0, GetArrayLayers(),
+ DAWN_TRY(ClearTexture(commandContext, GetAllSubresources(),
TextureBase::ClearValue::NonZero));
}
return {};
}
+ Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
+ : TextureBase(device, descriptor, state),
+ mSubresourceStateAndDecay(
+ GetSubresourceCount(),
+ {D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON, UINT64_MAX, false}) {
+ }
+
Texture::Texture(Device* device,
const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> nativeTexture)
- : TextureBase(device, descriptor, TextureState::OwnedExternal) {
+ : Texture(device, descriptor, TextureState::OwnedExternal) {
AllocationInfo info;
info.mMethod = AllocationMethod::kExternal;
// When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
@@ -497,8 +508,7 @@ namespace dawn_native { namespace d3d12 {
// memory management.
mResourceAllocation = {info, 0, std::move(nativeTexture), nullptr};
- SetIsSubresourceContentInitialized(true, 0, descriptor->mipLevelCount, 0,
- descriptor->arrayLayerCount);
+ SetIsSubresourceContentInitialized(true, GetAllSubresources());
}
Texture::~Texture() {
@@ -537,135 +547,224 @@ namespace dawn_native { namespace d3d12 {
return mResourceAllocation.GetD3D12Resource().Get();
}
- UINT16 Texture::GetDepthOrArraySize() {
- switch (GetDimension()) {
- case wgpu::TextureDimension::e2D:
- return static_cast<UINT16>(GetArrayLayers());
- default:
- UNREACHABLE();
- }
+ void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()), range);
}
- // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
- // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
- // cause subsequent errors.
- bool Texture::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::TextureUsage newUsage) {
- return TrackUsageAndGetResourceBarrier(commandContext, barrier,
- D3D12TextureUsage(newUsage, GetFormat()));
+ void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage) {
+ TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()),
+ GetAllSubresources());
}
- // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
- // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
- // cause subsequent errors.
- bool Texture::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- D3D12_RESOURCE_STATES newState) {
+ void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState) {
+ TrackUsageAndTransitionNow(commandContext, newState, GetAllSubresources());
+ }
+
+ void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range) {
if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
// Track the underlying heap to ensure residency.
Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
}
- // Return the resource barrier.
- return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newState);
- }
-
- void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage) {
- D3D12_RESOURCE_BARRIER barrier;
-
- if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, usage)) {
- commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
- }
- }
-
- void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState) {
- D3D12_RESOURCE_BARRIER barrier;
+ std::vector<D3D12_RESOURCE_BARRIER> barriers;
+ barriers.reserve(range.levelCount * range.layerCount);
- if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newState)) {
- commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
+ TransitionUsageAndGetResourceBarrier(commandContext, &barriers, newState, range);
+ if (barriers.size()) {
+ commandContext->GetCommandList()->ResourceBarrier(barriers.size(), barriers.data());
}
}
- // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
- // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
- // cause subsequent errors.
- bool Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- D3D12_RESOURCE_STATES newState) {
- // Textures with keyed mutexes can be written from other graphics queues. Hence, they
- // must be acquired before command list submission to ensure work from the other queues
- // has finished. See Device::ExecuteCommandContext.
- if (mDxgiKeyedMutex != nullptr) {
- commandContext->AddToSharedTextureList(this);
- }
-
- // Avoid transitioning the texture when it isn't needed.
+ void Texture::TransitionSingleOrAllSubresources(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ uint32_t index,
+ D3D12_RESOURCE_STATES newState,
+ const Serial pendingCommandSerial,
+ bool allSubresources) {
+ StateAndDecay* state = &mSubresourceStateAndDecay[index];
+ // Reuse the subresource(s) directly and avoid transition when it isn't needed, and
+ // return false.
// TODO(cwallez@chromium.org): Need some form of UAV barriers at some point.
- if (mLastState == newState) {
- return false;
+ if (state->lastState == newState) {
+ return;
}
- D3D12_RESOURCE_STATES lastState = mLastState;
-
- // The COMMON state represents a state where no write operations can be pending, and where
- // all pixels are uncompressed. This makes it possible to transition to and from some states
- // without synchronization (i.e. without an explicit ResourceBarrier call). Textures can be
- // implicitly promoted to 1) a single write state, or 2) multiple read states. Textures will
- // implicitly decay to the COMMON state when all of the following are true: 1) the texture
- // is accessed on a command list, 2) the ExecuteCommandLists call that uses that command
- // list has ended, and 3) the texture was promoted implicitly to a read-only state and is
- // still in that state.
+ D3D12_RESOURCE_STATES lastState = state->lastState;
+
+ // The COMMON state represents a state where no write operations can be pending, and
+ // where all pixels are uncompressed. This makes it possible to transition to and
+ // from some states without synchronization (i.e. without an explicit
+ // ResourceBarrier call). Textures can be implicitly promoted to 1) a single write
+ // state, or 2) multiple read states. Textures will implicitly decay to the COMMON
+ // state when all of the following are true: 1) the texture is accessed on a command
+ // list, 2) the ExecuteCommandLists call that uses that command list has ended, and
+ // 3) the texture was promoted implicitly to a read-only state and is still in that
+ // state.
// https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
- // To track implicit decays, we must record the pending serial on which that transition will
- // occur. When that texture is used again, the previously recorded serial must be compared
- // to the last completed serial to determine if the texture has implicity decayed to the
- // common state.
- const Serial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
- if (mValidToDecay && pendingCommandSerial > mLastUsedSerial) {
+ // To track implicit decays, we must record the pending serial on which that
+ // transition will occur. When that texture is used again, the previously recorded
+ // serial must be compared to the last completed serial to determine if the texture
+ // has implicity decayed to the common state.
+ if (state->isValidToDecay && pendingCommandSerial > state->lastDecaySerial) {
lastState = D3D12_RESOURCE_STATE_COMMON;
}
// Update the tracked state.
- mLastState = newState;
+ state->lastState = newState;
- // Destination states that qualify for an implicit promotion for a non-simultaneous-access
- // texture: NON_PIXEL_SHADER_RESOURCE, PIXEL_SHADER_RESOURCE, COPY_SRC, COPY_DEST.
+ // Destination states that qualify for an implicit promotion for a
+ // non-simultaneous-access texture: NON_PIXEL_SHADER_RESOURCE,
+ // PIXEL_SHADER_RESOURCE, COPY_SRC, COPY_DEST.
{
- static constexpr D3D12_RESOURCE_STATES kD3D12TextureReadOnlyStates =
+ static constexpr D3D12_RESOURCE_STATES kD3D12PromotableReadOnlyStates =
D3D12_RESOURCE_STATE_COPY_SOURCE | D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
if (lastState == D3D12_RESOURCE_STATE_COMMON) {
- if (newState == (newState & kD3D12TextureReadOnlyStates)) {
+ if (newState == (newState & kD3D12PromotableReadOnlyStates)) {
// Implicit texture state decays can only occur when the texture was implicitly
- // transitioned to a read-only state. mValidToDecay is needed to differentiate
+ // transitioned to a read-only state. isValidToDecay is needed to differentiate
// between resources that were implictly or explicitly transitioned to a
// read-only state.
- mValidToDecay = true;
- mLastUsedSerial = pendingCommandSerial;
- return false;
+ state->isValidToDecay = true;
+ state->lastDecaySerial = pendingCommandSerial;
+ return;
} else if (newState == D3D12_RESOURCE_STATE_COPY_DEST) {
- mValidToDecay = false;
- return false;
+ state->isValidToDecay = false;
+ return;
}
}
}
- barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
- barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier->Transition.pResource = GetD3D12Resource();
- barrier->Transition.StateBefore = lastState;
- barrier->Transition.StateAfter = newState;
- barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
+ D3D12_RESOURCE_BARRIER barrier;
+ barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+ barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+ barrier.Transition.pResource = GetD3D12Resource();
+ barrier.Transition.StateBefore = lastState;
+ barrier.Transition.StateAfter = newState;
+ barrier.Transition.Subresource =
+ allSubresources ? D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES : index;
+ barriers->push_back(barrier);
+ // TODO(yunchao.he@intel.com): support subresource for depth/stencil. Depth stencil
+ // texture has different plane slices. While the current implementation only has differernt
+ // mip slices and array slices for subresources.
+ // This is a hack because Dawn doesn't handle subresource of multiplanar resources
+ // correctly. We force the transition to be the same for all planes to match what the
+ // frontend validation checks for. This hack might be incorrect for stencil-only texture
+ // because we always set transition barrier for depth plane.
+ if (!allSubresources && newState == D3D12_RESOURCE_STATE_DEPTH_WRITE &&
+ GetFormat().HasStencil()) {
+ D3D12_RESOURCE_BARRIER barrierStencil = barrier;
+ barrierStencil.Transition.Subresource += GetArrayLayers() * GetNumMipLevels();
+ barriers->push_back(barrierStencil);
+ }
- mValidToDecay = false;
+ state->isValidToDecay = false;
+ }
- return true;
+ void Texture::HandleTransitionSpecialCases(CommandRecordingContext* commandContext) {
+ // Textures with keyed mutexes can be written from other graphics queues. Hence, they
+ // must be acquired before command list submission to ensure work from the other queues
+ // has finished. See Device::ExecuteCommandContext.
+ if (mDxgiKeyedMutex != nullptr) {
+ commandContext->AddToSharedTextureList(this);
+ }
+ }
+
+ void Texture::TransitionUsageAndGetResourceBarrier(
+ CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range) {
+ HandleTransitionSpecialCases(commandContext);
+
+ const Serial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
+ uint32_t subresourceCount = GetSubresourceCount();
+
+ // This transitions assume it is a 2D texture
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+
+ // If the usages transitions can cover all subresources, and old usages of all subresources
+ // are the same, then we can use one barrier to do state transition for all subresources.
+ // Note that if the texture has only one mip level and one array slice, it will fall into
+ // this category.
+ bool areAllSubresourcesCovered = range.levelCount * range.layerCount == subresourceCount;
+ if (mSameLastUsagesAcrossSubresources && areAllSubresourcesCovered) {
+ TransitionSingleOrAllSubresources(barriers, 0, newState, pendingCommandSerial, true);
+
+ // TODO(yunchao.he@intel.com): compress and decompress if all subresources have the
+ // same states. We may need to retain mSubresourceStateAndDecay[0] only.
+ for (uint32_t i = 1; i < subresourceCount; ++i) {
+ mSubresourceStateAndDecay[i] = mSubresourceStateAndDecay[0];
+ }
+
+ return;
+ }
+ for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
+ uint32_t index = GetSubresourceIndex(range.baseMipLevel + mipLevel,
+ range.baseArrayLayer + arrayLayer);
+
+ TransitionSingleOrAllSubresources(barriers, index, newState, pendingCommandSerial,
+ false);
+ }
+ }
+ mSameLastUsagesAcrossSubresources = areAllSubresourcesCovered;
+ }
+
+ void Texture::TrackUsageAndGetResourceBarrierForPass(
+ CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ const PassTextureUsage& textureUsages) {
+ HandleTransitionSpecialCases(commandContext);
+
+ const Serial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
+ uint32_t subresourceCount = GetSubresourceCount();
+ ASSERT(textureUsages.subresourceUsages.size() == subresourceCount);
+ // This transitions assume it is a 2D texture
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+
+ // If new usages of all subresources are the same and old usages of all subresources are
+ // the same too, we can use one barrier to do state transition for all subresources.
+ // Note that if the texture has only one mip level and one array slice, it will fall into
+ // this category.
+ if (textureUsages.sameUsagesAcrossSubresources && mSameLastUsagesAcrossSubresources) {
+ D3D12_RESOURCE_STATES newState = D3D12TextureUsage(textureUsages.usage, GetFormat());
+ TransitionSingleOrAllSubresources(barriers, 0, newState, pendingCommandSerial, true);
+
+ // TODO(yunchao.he@intel.com): compress and decompress if all subresources have the
+ // same states. We may need to retain mSubresourceStateAndDecay[0] only.
+ for (uint32_t i = 1; i < subresourceCount; ++i) {
+ mSubresourceStateAndDecay[i] = mSubresourceStateAndDecay[0];
+ }
+
+ return;
+ }
+
+ for (uint32_t arrayLayer = 0; arrayLayer < GetArrayLayers(); ++arrayLayer) {
+ for (uint32_t mipLevel = 0; mipLevel < GetNumMipLevels(); ++mipLevel) {
+ uint32_t index = GetSubresourceIndex(mipLevel, arrayLayer);
+
+ // Skip if this subresource is not used during the current pass
+ if (textureUsages.subresourceUsages[index] == wgpu::TextureUsage::None) {
+ continue;
+ }
+
+ D3D12_RESOURCE_STATES newState =
+ D3D12TextureUsage(textureUsages.subresourceUsages[index], GetFormat());
+
+ TransitionSingleOrAllSubresources(barriers, index, newState, pendingCommandSerial,
+ false);
+ }
+ }
+ mSameLastUsagesAcrossSubresources = textureUsages.sameUsagesAcrossSubresources;
}
D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(uint32_t mipLevel,
@@ -720,15 +819,11 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
+ const SubresourceRange& range,
TextureBase::ClearValue clearValue) {
// TODO(jiawei.shao@intel.com): initialize the textures in compressed formats with copies.
if (GetFormat().isCompressed) {
- SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
- layerCount);
+ SetIsSubresourceContentInitialized(true, range);
return {};
}
@@ -741,15 +836,17 @@ namespace dawn_native { namespace d3d12 {
if (GetFormat().isRenderable) {
if (GetFormat().HasDepthOrStencil()) {
- TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE);
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE, range);
D3D12_CLEAR_FLAGS clearFlags = {};
- for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
- for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
- ++layer) {
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, layer))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -775,16 +872,19 @@ namespace dawn_native { namespace d3d12 {
}
}
} else {
- TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET);
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET,
+ range);
const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor,
fClearColor};
- for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
- for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
- ++layer) {
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, layer))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -805,9 +905,9 @@ namespace dawn_native { namespace d3d12 {
// TODO(natlee@microsoft.com): test compressed textures are cleared
// create temp buffer with clear color to copy to the texture image
uint32_t bytesPerRow =
- Align((GetSize().width / GetFormat().blockWidth) * GetFormat().blockByteSize,
+ Align((GetWidth() / GetFormat().blockWidth) * GetFormat().blockByteSize,
kTextureBytesPerRowAlignment);
- uint64_t bufferSize64 = bytesPerRow * (GetSize().height / GetFormat().blockHeight);
+ uint64_t bufferSize64 = bytesPerRow * (GetHeight() / GetFormat().blockHeight);
if (bufferSize64 > std::numeric_limits<uint32_t>::max()) {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
}
@@ -818,18 +918,23 @@ namespace dawn_native { namespace d3d12 {
uploader->Allocate(bufferSize, device->GetPendingCommandSerial()));
memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
- TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST);
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST, range);
- for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
// compute d3d12 texture copy locations for texture and buffer
Extent3D copySize = GetMipLevelVirtualSize(level);
- TextureCopySplit copySplit = ComputeTextureCopySplit(
- {0, 0, 0}, copySize, GetFormat(), uploadHandle.startOffset, bytesPerRow, 0);
- for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
- ++layer) {
+ uint32_t rowsPerImage = GetHeight();
+ TextureCopySplit copySplit =
+ ComputeTextureCopySplit({0, 0, 0}, copySize, GetFormat(),
+ uploadHandle.startOffset, bytesPerRow, rowsPerImage);
+
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, layer))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -855,28 +960,22 @@ namespace dawn_native { namespace d3d12 {
}
}
if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
- layerCount);
+ SetIsSubresourceContentInitialized(true, range);
GetDevice()->IncrementLazyClearCountForTesting();
}
return {};
}
void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount) {
+ const SubresourceRange& range) {
if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
}
- if (!IsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
- layerCount)) {
+ if (!IsSubresourceContentInitialized(range)) {
// If subresource has not been initialized, clear it to black as it could contain
// dirty bits from recycled memory
- GetDevice()->ConsumedError(ClearTexture(commandContext, baseMipLevel, levelCount,
- baseArrayLayer, layerCount,
- TextureBase::ClearValue::Zero));
+ GetDevice()->ConsumedError(
+ ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
}
}
@@ -893,35 +992,50 @@ namespace dawn_native { namespace d3d12 {
// Currently we always use D3D12_TEX2D_ARRAY_SRV because we cannot specify base array layer
// and layer count in D3D12_TEX2D_SRV. For 2D texture views, we treat them as 1-layer 2D
// array textures.
+ // Multisampled textures may only be one array layer, so we use
+ // D3D12_SRV_DIMENSION_TEXTURE2DMS.
// https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_srv
// https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array_srv
// TODO(jiawei.shao@intel.com): support more texture view dimensions.
- // TODO(jiawei.shao@intel.com): support creating SRV on multisampled textures.
- switch (descriptor->dimension) {
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
- mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
- mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
- mSrvDesc.Texture2DArray.MipLevels = descriptor->mipLevelCount;
- mSrvDesc.Texture2DArray.MostDetailedMip = descriptor->baseMipLevel;
- mSrvDesc.Texture2DArray.PlaneSlice = 0;
- mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
- break;
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- ASSERT(descriptor->arrayLayerCount % 6 == 0);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
- mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
- mSrvDesc.TextureCubeArray.NumCubes = descriptor->arrayLayerCount / 6;
- mSrvDesc.TextureCubeArray.MostDetailedMip = descriptor->baseMipLevel;
- mSrvDesc.TextureCubeArray.MipLevels = descriptor->mipLevelCount;
- mSrvDesc.TextureCubeArray.ResourceMinLODClamp = 0;
- break;
- default:
- UNREACHABLE();
+ if (GetTexture()->IsMultisampledTexture()) {
+ switch (descriptor->dimension) {
+ case wgpu::TextureViewDimension::e2DArray:
+ ASSERT(texture->GetArrayLayers() == 1);
+ DAWN_FALLTHROUGH;
+ case wgpu::TextureViewDimension::e2D:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DMS;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (descriptor->dimension) {
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
+ mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
+ mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
+ mSrvDesc.Texture2DArray.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.Texture2DArray.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.Texture2DArray.PlaneSlice = 0;
+ mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
+ break;
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(descriptor->arrayLayerCount % 6 == 0);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
+ mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
+ mSrvDesc.TextureCubeArray.NumCubes = descriptor->arrayLayerCount / 6;
+ mSrvDesc.TextureCubeArray.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.TextureCubeArray.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.TextureCubeArray.ResourceMinLODClamp = 0;
+ break;
+ default:
+ UNREACHABLE();
+ }
}
}
@@ -939,11 +1053,9 @@ namespace dawn_native { namespace d3d12 {
}
D3D12_DEPTH_STENCIL_VIEW_DESC TextureView::GetDSVDescriptor() const {
- // TODO(jiawei.shao@intel.com): support rendering into a layer of a texture.
ASSERT(GetLevelCount() == 1);
- uint32_t mipLevel = GetBaseMipLevel();
return ToBackend(GetTexture())
- ->GetDSVDescriptor(mipLevel, GetBaseArrayLayer(), GetLayerCount());
+ ->GetDSVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount());
}
D3D12_UNORDERED_ACCESS_VIEW_DESC TextureView::GetUAVDescriptor() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
index 45443c11de1..ef17bf40628 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
@@ -19,6 +19,7 @@
#include "dawn_native/Texture.h"
#include "dawn_native/DawnNative.h"
+#include "dawn_native/PassResourceUsage.h"
#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
@@ -55,20 +56,24 @@ namespace dawn_native { namespace d3d12 {
uint32_t baseArrayLayer,
uint32_t layerCount) const;
void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount);
-
- bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::TextureUsage newUsage);
+ const SubresourceRange& range);
+
+ void TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ const PassTextureUsage& textureUsages);
void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage);
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range);
void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState);
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range);
+ void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage);
+ void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState);
private:
+ Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
~Texture() override;
using TextureBase::TextureBase;
@@ -81,26 +86,31 @@ namespace dawn_native { namespace d3d12 {
// Dawn API
void DestroyImpl() override;
MaybeError ClearTexture(CommandRecordingContext* commandContext,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
+ const SubresourceRange& range,
TextureBase::ClearValue clearValue);
- UINT16 GetDepthOrArraySize();
+ void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range);
- bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- D3D12_RESOURCE_STATES newState);
- bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- D3D12_RESOURCE_STATES newState);
+ void TransitionSingleOrAllSubresources(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ uint32_t index,
+ D3D12_RESOURCE_STATES subresourceNewState,
+ const Serial pendingCommandSerial,
+ bool allSubresources);
+ void HandleTransitionSpecialCases(CommandRecordingContext* commandContext);
- ResourceHeapAllocation mResourceAllocation;
- D3D12_RESOURCE_STATES mLastState = D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON;
+ bool mSameLastUsagesAcrossSubresources = true;
- Serial mLastUsedSerial = UINT64_MAX;
- bool mValidToDecay = false;
+ struct StateAndDecay {
+ D3D12_RESOURCE_STATES lastState;
+ Serial lastDecaySerial;
+ bool isValidToDecay;
+ };
+ std::vector<StateAndDecay> mSubresourceStateAndDecay;
+
+ ResourceHeapAllocation mResourceAllocation;
bool mSwapChainTexture = false;
Serial mAcquireMutexKey = 0;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
index 04e6669f0c2..d8c20ef1613 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
@@ -16,8 +16,29 @@
#include "common/Assert.h"
+#include <stringapiset.h>
+
namespace dawn_native { namespace d3d12 {
+ ResultOrError<std::wstring> ConvertStringToWstring(const char* str) {
+ size_t len = strlen(str);
+ if (len == 0) {
+ return std::wstring();
+ }
+ int numChars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, nullptr, 0);
+ if (numChars == 0) {
+ return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
+ }
+ std::wstring result;
+ result.resize(numChars);
+ int numConvertedChars =
+ MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, &result[0], numChars);
+ if (numConvertedChars != numChars) {
+ return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
+ }
+ return std::move(result);
+ }
+
D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func) {
switch (func) {
case wgpu::CompareFunction::Never:
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
index 36a5abe4ab3..d1559e72a14 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
@@ -23,6 +23,8 @@
namespace dawn_native { namespace d3d12 {
+ ResultOrError<std::wstring> ConvertStringToWstring(const char* str);
+
D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func);
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h b/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h
index a64486c64bb..1c733c8256e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h
@@ -15,9 +15,15 @@
#ifndef DAWNNATIVE_D3D12_D3D12PLATFORM_H_
#define DAWNNATIVE_D3D12_D3D12PLATFORM_H_
+// Pre-emptively include windows.h but remove its macros so that they aren't set when declaring the
+// COM interfaces. Otherwise ID3D12InfoQueue::GetMessage would be either GetMessageA or GetMessageW
+// which causes compilation errors.
+#include "common/windows_with_undefs.h"
+
#include <d3d11_2.h>
#include <d3d11on12.h>
#include <d3d12.h>
+#include <dxcapi.h>
#include <dxgi1_4.h>
#include <wrl.h>
@@ -28,10 +34,4 @@
using Microsoft::WRL::ComPtr;
-// Remove windows.h macros after d3d12's include of windows.h
-#include "common/Platform.h"
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include "common/windows_with_undefs.h"
-#endif
-
#endif // DAWNNATIVE_D3D12_D3D12PLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
index 63567a2fc70..a5e7c474fbf 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
@@ -214,7 +214,14 @@ namespace dawn_native { namespace metal {
if ([mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
mSupportedExtensions.EnableExtension(Extension::TextureCompressionBC);
}
+
+ if (@available(macOS 10.15, *)) {
+ mSupportedExtensions.EnableExtension(Extension::PipelineStatisticsQuery);
+ mSupportedExtensions.EnableExtension(Extension::TimestampQuery);
+ }
#endif
+
+ mSupportedExtensions.EnableExtension(Extension::ShaderFloat16);
}
id<MTLDevice> mDevice = nil;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h
index 081503b34ee..98bab96244c 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h
@@ -29,8 +29,6 @@ namespace dawn_native { namespace metal {
static ResultOrError<Buffer*> Create(Device* device, const BufferDescriptor* descriptor);
id<MTLBuffer> GetMTLBuffer() const;
- void OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite);
-
private:
using BufferBase::BufferBase;
MaybeError Initialize();
@@ -40,30 +38,14 @@ namespace dawn_native { namespace metal {
MaybeError MapWriteAsyncImpl(uint32_t serial) override;
void UnmapImpl() override;
void DestroyImpl() override;
+ void* GetMappedPointerImpl() override;
bool IsMapWritable() const override;
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
- id<MTLBuffer> mMtlBuffer = nil;
- };
-
- class MapRequestTracker {
- public:
- MapRequestTracker(Device* device);
- ~MapRequestTracker();
+ void ClearBuffer(BufferBase::ClearValue clearValue);
- void Track(Buffer* buffer, uint32_t mapSerial, bool isWrite);
- void Tick(Serial finishedSerial);
-
- private:
- Device* mDevice;
-
- struct Request {
- Ref<Buffer> buffer;
- uint32_t mapSerial;
- bool isWrite;
- };
- SerialQueue<Request> mInflightRequests;
+ id<MTLBuffer> mMtlBuffer = nil;
};
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
index 5b577785a29..ccfd3b39ecb 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
@@ -24,6 +24,10 @@ namespace dawn_native { namespace metal {
// largest alignment of supported data types
static constexpr uint32_t kMinUniformOrStorageBufferAlignment = 16u;
+ // The maximum buffer size if querying the maximum buffer size or recommended working set size
+ // is not available. This is a somewhat arbitrary limit of 1 GiB.
+ static constexpr uint32_t kMaxBufferSizeFallback = 1024u * 1024u * 1024u;
+
// static
ResultOrError<Buffer*> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
@@ -39,23 +43,53 @@ namespace dawn_native { namespace metal {
storageMode = MTLResourceStorageModePrivate;
}
- if (GetSize() >
- std::numeric_limits<uint64_t>::max() - kMinUniformOrStorageBufferAlignment) {
+ // TODO(cwallez@chromium.org): Have a global "zero" buffer that can do everything instead
+ // of creating a new 4-byte buffer?
+ if (GetSize() > std::numeric_limits<NSUInteger>::max()) {
return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
}
+ NSUInteger currentSize = static_cast<NSUInteger>(std::max(GetSize(), uint64_t(4u)));
- // TODO(cwallez@chromium.org): Have a global "zero" buffer that can do everything instead
- // of creating a new 4-byte buffer?
- uint32_t currentSize = std::max(GetSize(), uint64_t(4u));
// Metal validation layer requires the size of uniform buffer and storage buffer to be no
// less than the size of the buffer block defined in shader, and the overall size of the
// buffer must be aligned to the largest alignment of its members.
if (GetUsage() & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage)) {
+ if (currentSize >
+ std::numeric_limits<NSUInteger>::max() - kMinUniformOrStorageBufferAlignment) {
+ // Alignment would overlow.
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
currentSize = Align(currentSize, kMinUniformOrStorageBufferAlignment);
}
+ if (@available(iOS 12, macOS 10.14, *)) {
+ NSUInteger maxBufferSize = [ToBackend(GetDevice())->GetMTLDevice() maxBufferLength];
+ if (currentSize > maxBufferSize) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+ } else if (@available(macOS 10.12, *)) {
+ // |maxBufferLength| isn't always available on older systems. If available, use
+ // |recommendedMaxWorkingSetSize| instead. We can probably allocate more than this,
+ // but don't have a way to discover a better limit. MoltenVK also uses this heuristic.
+ uint64_t maxWorkingSetSize =
+ [ToBackend(GetDevice())->GetMTLDevice() recommendedMaxWorkingSetSize];
+ if (currentSize > maxWorkingSetSize) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+ } else if (currentSize > kMaxBufferSizeFallback) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+
mMtlBuffer = [ToBackend(GetDevice())->GetMTLDevice() newBufferWithLength:currentSize
options:storageMode];
+ if (mMtlBuffer == nil) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation failed");
+ }
+
+ if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ ClearBuffer(BufferBase::ClearValue::NonZero);
+ }
+
return {};
}
@@ -67,15 +101,6 @@ namespace dawn_native { namespace metal {
return mMtlBuffer;
}
- void Buffer::OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite) {
- char* data = reinterpret_cast<char*>([mMtlBuffer contents]);
- if (isWrite) {
- CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
- } else {
- CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
- }
- }
-
bool Buffer::IsMapWritable() const {
// TODO(enga): Handle CPU-visible memory on UMA
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
@@ -87,17 +112,17 @@ namespace dawn_native { namespace metal {
}
MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
- MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapTracker();
- tracker->Track(this, serial, false);
return {};
}
MaybeError Buffer::MapWriteAsyncImpl(uint32_t serial) {
- MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapTracker();
- tracker->Track(this, serial, true);
return {};
}
+ void* Buffer::GetMappedPointerImpl() {
+ return reinterpret_cast<uint8_t*>([mMtlBuffer contents]);
+ }
+
void Buffer::UnmapImpl() {
// Nothing to do, Metal StorageModeShared buffers are always mapped.
}
@@ -107,29 +132,16 @@ namespace dawn_native { namespace metal {
mMtlBuffer = nil;
}
- MapRequestTracker::MapRequestTracker(Device* device) : mDevice(device) {
- }
-
- MapRequestTracker::~MapRequestTracker() {
- ASSERT(mInflightRequests.Empty());
- }
-
- void MapRequestTracker::Track(Buffer* buffer,
- uint32_t mapSerial,
- bool isWrite) {
- Request request;
- request.buffer = buffer;
- request.mapSerial = mapSerial;
- request.isWrite = isWrite;
+ void Buffer::ClearBuffer(BufferBase::ClearValue clearValue) {
+ // TODO(jiawei.shao@intel.com): support buffer lazy-initialization to 0.
+ ASSERT(clearValue == BufferBase::ClearValue::NonZero);
+ const uint8_t clearBufferValue = 1;
- mInflightRequests.Enqueue(std::move(request), mDevice->GetPendingCommandSerial());
- }
-
- void MapRequestTracker::Tick(Serial finishedSerial) {
- for (auto& request : mInflightRequests.IterateUpTo(finishedSerial)) {
- request.buffer->OnMapCommandSerialFinished(request.mapSerial, request.isWrite);
- }
- mInflightRequests.ClearUpTo(finishedSerial);
+ Device* device = ToBackend(GetDevice());
+ CommandRecordingContext* commandContext = device->GetPendingCommandContext();
+ [commandContext->EnsureBlit() fillBuffer:mMtlBuffer
+ range:NSMakeRange(0, GetSize())
+ value:clearBufferValue];
}
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
index f27e7d4b0f1..64c098dce51 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
@@ -102,13 +102,14 @@ namespace dawn_native { namespace metal {
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
auto& attachmentInfo = renderPass->depthStencilAttachment;
- // TODO(jiawei.shao@intel.com): support rendering into a layer of a texture.
id<MTLTexture> texture =
ToBackend(attachmentInfo.view->GetTexture())->GetMTLTexture();
const Format& format = attachmentInfo.view->GetTexture()->GetFormat();
if (format.HasDepth()) {
descriptor.depthAttachment.texture = texture;
+ descriptor.depthAttachment.level = attachmentInfo.view->GetBaseMipLevel();
+ descriptor.depthAttachment.slice = attachmentInfo.view->GetBaseArrayLayer();
switch (attachmentInfo.depthStoreOp) {
case wgpu::StoreOp::Store:
@@ -142,6 +143,8 @@ namespace dawn_native { namespace metal {
if (format.HasStencil()) {
descriptor.stencilAttachment.texture = texture;
+ descriptor.stencilAttachment.level = attachmentInfo.view->GetBaseMipLevel();
+ descriptor.stencilAttachment.slice = attachmentInfo.view->GetBaseArrayLayer();
switch (attachmentInfo.stencilStoreOp) {
case wgpu::StoreOp::Store:
@@ -313,8 +316,8 @@ namespace dawn_native { namespace metal {
NSUInteger bufferOffset;
NSUInteger bytesPerRow;
NSUInteger bytesPerImage;
- MTLOrigin textureOrigin;
- MTLSize copyExtent;
+ Origin3D textureOrigin;
+ Extent3D copyExtent;
};
uint32_t count = 0;
@@ -325,11 +328,8 @@ namespace dawn_native { namespace metal {
return MTLOriginMake(origin.x, origin.y, origin.z);
}
- MTLSize MakeMTLSize(Extent3D extent) {
- return MTLSizeMake(extent.width, extent.height, extent.depth);
- }
-
- TextureBufferCopySplit ComputeTextureBufferCopySplit(Origin3D origin,
+ TextureBufferCopySplit ComputeTextureBufferCopySplit(wgpu::TextureDimension dimension,
+ Origin3D origin,
Extent3D copyExtent,
Format textureFormat,
Extent3D virtualSizeAtLevel,
@@ -372,6 +372,8 @@ namespace dawn_native { namespace metal {
? (virtualSizeAtLevel.height - origin.y)
: copyExtent.height;
+ ASSERT(dimension == wgpu::TextureDimension::e2D);
+
// Check whether buffer size is big enough.
bool needWorkaround = bufferSize - bufferOffset < bytesPerImage * copyExtent.depth;
if (!needWorkaround) {
@@ -379,9 +381,9 @@ namespace dawn_native { namespace metal {
copy.copies[0].bufferOffset = bufferOffset;
copy.copies[0].bytesPerRow = bytesPerRow;
copy.copies[0].bytesPerImage = bytesPerImage;
- copy.copies[0].textureOrigin = MakeMTLOrigin(origin);
- copy.copies[0].copyExtent =
- MTLSizeMake(clampedCopyExtentWidth, clampedCopyExtentHeight, copyExtent.depth);
+ copy.copies[0].textureOrigin = origin;
+ copy.copies[0].copyExtent = {clampedCopyExtentWidth, clampedCopyExtentHeight,
+ copyExtent.depth};
return copy;
}
@@ -392,9 +394,9 @@ namespace dawn_native { namespace metal {
copy.copies[copy.count].bufferOffset = currentOffset;
copy.copies[copy.count].bytesPerRow = bytesPerRow;
copy.copies[copy.count].bytesPerImage = bytesPerImage;
- copy.copies[copy.count].textureOrigin = MakeMTLOrigin(origin);
- copy.copies[copy.count].copyExtent = MTLSizeMake(
- clampedCopyExtentWidth, clampedCopyExtentHeight, copyExtent.depth - 1);
+ copy.copies[copy.count].textureOrigin = origin;
+ copy.copies[copy.count].copyExtent = {
+ clampedCopyExtentWidth, clampedCopyExtentHeight, copyExtent.depth - 1};
++copy.count;
@@ -408,12 +410,12 @@ namespace dawn_native { namespace metal {
copy.copies[copy.count].bufferOffset = currentOffset;
copy.copies[copy.count].bytesPerRow = bytesPerRow;
copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
- copy.copies[copy.count].textureOrigin =
- MTLOriginMake(origin.x, origin.y, origin.z + copyExtent.depth - 1);
+ copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
+ origin.z + copyExtent.depth - 1};
ASSERT(copyExtent.height - textureFormat.blockHeight < virtualSizeAtLevel.height);
- copy.copies[copy.count].copyExtent = MTLSizeMake(
- clampedCopyExtentWidth, copyExtent.height - textureFormat.blockHeight, 1);
+ copy.copies[copy.count].copyExtent = {
+ clampedCopyExtentWidth, copyExtent.height - textureFormat.blockHeight, 1};
++copy.count;
@@ -432,11 +434,11 @@ namespace dawn_native { namespace metal {
copy.copies[copy.count].bufferOffset = currentOffset;
copy.copies[copy.count].bytesPerRow = lastRowDataSize;
copy.copies[copy.count].bytesPerImage = lastRowDataSize;
- copy.copies[copy.count].textureOrigin =
- MTLOriginMake(origin.x, origin.y + copyExtent.height - textureFormat.blockHeight,
- origin.z + copyExtent.depth - 1);
- copy.copies[copy.count].copyExtent =
- MTLSizeMake(clampedCopyExtentWidth, lastRowCopyExtentHeight, 1);
+ copy.copies[copy.count].textureOrigin = {
+ origin.x, origin.y + copyExtent.height - textureFormat.blockHeight,
+ origin.z + copyExtent.depth - 1};
+ copy.copies[copy.count].copyExtent = {clampedCopyExtentWidth, lastRowCopyExtentHeight,
+ 1};
++copy.count;
return copy;
@@ -445,19 +447,18 @@ namespace dawn_native { namespace metal {
void EnsureSourceTextureInitialized(Texture* texture,
const Extent3D& size,
const TextureCopy& src) {
- // TODO(crbug.com/dawn/145): Specify multiple layers based on |size|
- texture->EnsureSubresourceContentInitialized(src.mipLevel, 1, src.arrayLayer, 1);
+ texture->EnsureSubresourceContentInitialized(
+ {src.mipLevel, 1, src.arrayLayer, size.depth});
}
void EnsureDestinationTextureInitialized(Texture* texture,
const Extent3D& size,
const TextureCopy& dst) {
- // TODO(crbug.com/dawn/145): Specify multiple layers based on |size|
+ SubresourceRange range = {dst.mipLevel, 1, dst.arrayLayer, size.depth};
if (IsCompleteSubresourceCopiedTo(texture, size, dst.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, dst.mipLevel, 1, dst.arrayLayer,
- 1);
+ texture->SetIsSubresourceContentInitialized(true, range);
} else {
- texture->EnsureSubresourceContentInitialized(dst.mipLevel, 1, dst.arrayLayer, 1);
+ texture->EnsureSubresourceContentInitialized(range);
}
}
@@ -473,7 +474,8 @@ namespace dawn_native { namespace metal {
template <typename Encoder>
void Apply(Encoder encoder) {
- for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ for (BindGroupIndex index :
+ IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
ApplyBindGroup(encoder, index, ToBackend(mBindGroups[index]),
mDynamicOffsetCounts[index], mDynamicOffsets[index].data(),
ToBackend(mPipelineLayout));
@@ -488,7 +490,7 @@ namespace dawn_native { namespace metal {
// two encoder types.
void ApplyBindGroupImpl(id<MTLRenderCommandEncoder> render,
id<MTLComputeCommandEncoder> compute,
- uint32_t index,
+ BindGroupIndex index,
BindGroup* group,
uint32_t dynamicOffsetCount,
uint64_t* dynamicOffsets,
@@ -498,7 +500,7 @@ namespace dawn_native { namespace metal {
// TODO(kainino@chromium.org): Maintain buffers and offsets arrays in BindGroup
// so that we only have to do one setVertexBuffers and one setFragmentBuffers
// call here.
- for (BindingIndex bindingIndex = 0;
+ for (BindingIndex bindingIndex{0};
bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
const BindingInfo& bindingInfo =
group->GetLayout()->GetBindingInfo(bindingIndex);
@@ -692,8 +694,7 @@ namespace dawn_native { namespace metal {
// cleared in CreateMTLRenderPassDescriptor by setting the loadop to clear when the
// texture subresource has not been initialized before the render pass.
if (!(usages.textureUsages[i].usage & wgpu::TextureUsage::OutputAttachment)) {
- texture->EnsureSubresourceContentInitialized(0, texture->GetNumMipLevels(), 0,
- texture->GetArrayLayers());
+ texture->EnsureSubresourceContentInitialized(texture->GetAllSubresources());
}
}
};
@@ -749,23 +750,42 @@ namespace dawn_native { namespace metal {
EnsureDestinationTextureInitialized(texture, copy->copySize, copy->destination);
- Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(dst.mipLevel);
- TextureBufferCopySplit splittedCopies = ComputeTextureBufferCopySplit(
- dst.origin, copySize, texture->GetFormat(), virtualSizeAtLevel,
- buffer->GetSize(), src.offset, src.bytesPerRow, src.rowsPerImage);
-
- for (uint32_t i = 0; i < splittedCopies.count; ++i) {
- const TextureBufferCopySplit::CopyInfo& copyInfo = splittedCopies.copies[i];
- [commandContext->EnsureBlit() copyFromBuffer:buffer->GetMTLBuffer()
- sourceOffset:copyInfo.bufferOffset
- sourceBytesPerRow:copyInfo.bytesPerRow
- sourceBytesPerImage:copyInfo.bytesPerImage
- sourceSize:copyInfo.copyExtent
- toTexture:texture->GetMTLTexture()
- destinationSlice:dst.arrayLayer
- destinationLevel:dst.mipLevel
- destinationOrigin:copyInfo.textureOrigin];
+ const Extent3D virtualSizeAtLevel =
+ texture->GetMipLevelVirtualSize(dst.mipLevel);
+
+ Origin3D copyOrigin = dst.origin;
+ copyOrigin.z = dst.arrayLayer;
+ TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
+ texture->GetDimension(), copyOrigin, copySize, texture->GetFormat(),
+ virtualSizeAtLevel, buffer->GetSize(), src.offset, src.bytesPerRow,
+ src.rowsPerImage);
+
+ for (uint32_t i = 0; i < splitCopies.count; ++i) {
+ const TextureBufferCopySplit::CopyInfo& copyInfo = splitCopies.copies[i];
+
+ const uint32_t copyBaseLayer = copyInfo.textureOrigin.z;
+ const uint32_t copyLayerCount = copyInfo.copyExtent.depth;
+ const MTLOrigin textureOrigin =
+ MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+ const MTLSize copyExtent =
+ MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+
+ uint64_t bufferOffset = copyInfo.bufferOffset;
+ for (uint32_t copyLayer = copyBaseLayer;
+ copyLayer < copyBaseLayer + copyLayerCount; ++copyLayer) {
+ [commandContext->EnsureBlit() copyFromBuffer:buffer->GetMTLBuffer()
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:copyExtent
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:copyLayer
+ destinationLevel:dst.mipLevel
+ destinationOrigin:textureOrigin];
+ bufferOffset += copyInfo.bytesPerImage;
+ }
}
+
break;
}
@@ -780,22 +800,39 @@ namespace dawn_native { namespace metal {
EnsureSourceTextureInitialized(texture, copy->copySize, copy->source);
Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(src.mipLevel);
- TextureBufferCopySplit splittedCopies = ComputeTextureBufferCopySplit(
- src.origin, copySize, texture->GetFormat(), virtualSizeAtLevel,
- buffer->GetSize(), dst.offset, dst.bytesPerRow, dst.rowsPerImage);
-
- for (uint32_t i = 0; i < splittedCopies.count; ++i) {
- const TextureBufferCopySplit::CopyInfo& copyInfo = splittedCopies.copies[i];
- [commandContext->EnsureBlit() copyFromTexture:texture->GetMTLTexture()
- sourceSlice:src.arrayLayer
- sourceLevel:src.mipLevel
- sourceOrigin:copyInfo.textureOrigin
- sourceSize:copyInfo.copyExtent
- toBuffer:buffer->GetMTLBuffer()
- destinationOffset:copyInfo.bufferOffset
- destinationBytesPerRow:copyInfo.bytesPerRow
- destinationBytesPerImage:copyInfo.bytesPerImage];
+ Origin3D copyOrigin = src.origin;
+ copyOrigin.z = src.arrayLayer;
+ TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
+ texture->GetDimension(), copyOrigin, copySize, texture->GetFormat(),
+ virtualSizeAtLevel, buffer->GetSize(), dst.offset, dst.bytesPerRow,
+ dst.rowsPerImage);
+
+ for (uint32_t i = 0; i < splitCopies.count; ++i) {
+ const TextureBufferCopySplit::CopyInfo& copyInfo = splitCopies.copies[i];
+
+ const uint32_t copyBaseLayer = copyInfo.textureOrigin.z;
+ const uint32_t copyLayerCount = copyInfo.copyExtent.depth;
+ const MTLOrigin textureOrigin =
+ MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+ const MTLSize copyExtent =
+ MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+
+ uint64_t bufferOffset = copyInfo.bufferOffset;
+ for (uint32_t copyLayer = copyBaseLayer;
+ copyLayer < copyBaseLayer + copyLayerCount; ++copyLayer) {
+ [commandContext->EnsureBlit() copyFromTexture:texture->GetMTLTexture()
+ sourceSlice:copyLayer
+ sourceLevel:src.mipLevel
+ sourceOrigin:textureOrigin
+ sourceSize:copyExtent
+ toBuffer:buffer->GetMTLBuffer()
+ destinationOffset:bufferOffset
+ destinationBytesPerRow:copyInfo.bytesPerRow
+ destinationBytesPerImage:copyInfo.bytesPerImage];
+ bufferOffset += copyInfo.bytesPerImage;
+ }
}
+
break;
}
@@ -809,16 +846,24 @@ namespace dawn_native { namespace metal {
EnsureDestinationTextureInitialized(dstTexture, copy->copySize,
copy->destination);
- [commandContext->EnsureBlit()
- copyFromTexture:srcTexture->GetMTLTexture()
- sourceSlice:copy->source.arrayLayer
- sourceLevel:copy->source.mipLevel
- sourceOrigin:MakeMTLOrigin(copy->source.origin)
- sourceSize:MakeMTLSize(copy->copySize)
- toTexture:dstTexture->GetMTLTexture()
- destinationSlice:copy->destination.arrayLayer
- destinationLevel:copy->destination.mipLevel
- destinationOrigin:MakeMTLOrigin(copy->destination.origin)];
+ // TODO(jiawei.shao@intel.com): support copies with 1D and 3D textures.
+ ASSERT(srcTexture->GetDimension() == wgpu::TextureDimension::e2D &&
+ dstTexture->GetDimension() == wgpu::TextureDimension::e2D);
+ const MTLSize mtlSizeOneLayer =
+ MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
+ for (uint32_t slice = 0; slice < copy->copySize.depth; ++slice) {
+ [commandContext->EnsureBlit()
+ copyFromTexture:srcTexture->GetMTLTexture()
+ sourceSlice:copy->source.arrayLayer + slice
+ sourceLevel:copy->source.mipLevel
+ sourceOrigin:MakeMTLOrigin(copy->source.origin)
+ sourceSize:mtlSizeOneLayer
+ toTexture:dstTexture->GetMTLTexture()
+ destinationSlice:copy->destination.arrayLayer + slice
+ destinationLevel:copy->destination.mipLevel
+ destinationOrigin:MakeMTLOrigin(copy->destination.origin)];
+ }
+
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm
index 33df959693f..971691a4b24 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm
@@ -47,7 +47,7 @@ namespace dawn_native { namespace metal {
return nil;
}
- // A blit encoder can be left open from SetSubData, make sure we close it.
+ // A blit encoder can be left open from WriteBuffer, make sure we close it.
EndBlit();
ASSERT(!mInEncoder);
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
index b09926a8579..76e41518918 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
@@ -32,8 +32,6 @@
namespace dawn_native { namespace metal {
- class MapRequestTracker;
-
class Device : public DeviceBase {
public:
static ResultOrError<Device*> Create(AdapterBase* adapter,
@@ -54,8 +52,6 @@ namespace dawn_native { namespace metal {
CommandRecordingContext* GetPendingCommandContext();
void SubmitPendingCommandBuffer();
- MapRequestTracker* GetMapTracker() const;
-
TextureBase* CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
IOSurfaceRef ioSurface,
uint32_t plane);
@@ -80,6 +76,8 @@ namespace dawn_native { namespace metal {
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
@@ -104,7 +102,6 @@ namespace dawn_native { namespace metal {
id<MTLDevice> mMtlDevice = nil;
id<MTLCommandQueue> mCommandQueue = nil;
- std::unique_ptr<MapRequestTracker> mMapTracker;
CommandRecordingContext mCommandContext;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
index 905832e4ca4..d8d0feb6a16 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
@@ -51,7 +51,6 @@ namespace dawn_native { namespace metal {
const DeviceDescriptor* descriptor)
: DeviceBase(adapter, descriptor),
mMtlDevice([mtlDevice retain]),
- mMapTracker(new MapRequestTracker(this)),
mCompletedSerial(0) {
[mMtlDevice retain];
}
@@ -124,6 +123,9 @@ namespace dawn_native { namespace metal {
const PipelineLayoutDescriptor* descriptor) {
return new PipelineLayout(this, descriptor);
}
+ ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation");
+ }
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
return RenderPipeline::Create(this, descriptor);
@@ -156,9 +158,9 @@ namespace dawn_native { namespace metal {
Serial Device::CheckAndUpdateCompletedSerials() {
if (GetCompletedCommandSerial() > mCompletedSerial) {
- // sometimes we artificially increase the serials, in which case the completed serial in
+ // sometimes we increase the serials, in which case the completed serial in
// the device base will surpass the completed serial we have in the metal backend, so we
- // must update ours when we see that the completed serial from the frontend has
+ // must update ours when we see that the completed serial from device base has
// increased.
mCompletedSerial = GetCompletedCommandSerial();
}
@@ -167,17 +169,8 @@ namespace dawn_native { namespace metal {
}
MaybeError Device::TickImpl() {
- CheckPassedSerials();
- Serial completedSerial = GetCompletedCommandSerial();
-
- mMapTracker->Tick(completedSerial);
-
if (mCommandContext.GetCommands() != nil) {
SubmitPendingCommandBuffer();
- } else if (completedSerial == GetLastSubmittedCommandSerial()) {
- // If there's no GPU work in flight we still need to artificially increment the serial
- // so that CPU operations waiting on GPU completion can know they don't have to wait.
- ArtificiallyIncrementSerials();
}
return {};
@@ -245,10 +238,6 @@ namespace dawn_native { namespace metal {
[pendingCommands release];
}
- MapRequestTracker* Device::GetMapTracker() const {
- return mMapTracker.get();
- }
-
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
std::unique_ptr<StagingBufferBase> stagingBuffer =
std::make_unique<StagingBuffer>(size, this);
@@ -261,10 +250,9 @@ namespace dawn_native { namespace metal {
BufferBase* destination,
uint64_t destinationOffset,
uint64_t size) {
- // Metal validation layers forbid 0-sized copies, skip it since it is a noop.
- if (size == 0) {
- return {};
- }
+ // Metal validation layers forbid 0-sized copies, assert it is skipped prior to calling
+ // this function.
+ ASSERT(size != 0);
id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
@@ -281,6 +269,15 @@ namespace dawn_native { namespace metal {
uint32_t plane) {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
+
+ // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
+ // GPUTextureDescriptor.size.depth is done.
+ TextureDescriptor fixedDescriptor;
+ if (ConsumedError(FixTextureDescriptor(this, textureDescriptor), &fixedDescriptor)) {
+ return nullptr;
+ }
+ textureDescriptor = &fixedDescriptor;
+
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
return nullptr;
}
@@ -307,14 +304,6 @@ namespace dawn_native { namespace metal {
CheckPassedSerials();
}
- // Artificially increase the serials so work that was pending knows it can complete.
- ArtificiallyIncrementSerials();
-
- DAWN_TRY(TickImpl());
-
- // Force all operations to look as if they were completed
- AssumeCommandsComplete();
-
return {};
}
@@ -323,8 +312,6 @@ namespace dawn_native { namespace metal {
[mCommandContext.AcquireCommands() release];
- mMapTracker = nullptr;
-
[mCommandQueue release];
mCommandQueue = nil;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/Forward.h b/chromium/third_party/dawn/src/dawn_native/metal/Forward.h
index a773a182c89..9481348f520 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/Forward.h
@@ -28,6 +28,7 @@ namespace dawn_native { namespace metal {
class Device;
class Framebuffer;
class PipelineLayout;
+ class QuerySet;
class Queue;
class RenderPipeline;
class Sampler;
@@ -46,6 +47,7 @@ namespace dawn_native { namespace metal {
using ComputePipelineType = ComputePipeline;
using DeviceType = Device;
using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
using QueueType = Queue;
using RenderPipelineType = RenderPipeline;
using SamplerType = Sampler;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
index 6f2f64d8f94..7a3ad8084b3 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
@@ -15,6 +15,8 @@
#ifndef DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
#define DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
+#include "common/ityp_array.h"
+#include "dawn_native/BindingInfo.h"
#include "dawn_native/PipelineLayout.h"
#include "dawn_native/PerStage.h"
@@ -41,7 +43,9 @@ namespace dawn_native { namespace metal {
PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
using BindingIndexInfo =
- std::array<std::array<uint32_t, kMaxBindingsPerGroup>, kMaxBindGroups>;
+ ityp::array<BindGroupIndex,
+ ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup>,
+ kMaxBindGroups>;
const BindingIndexInfo& GetBindingIndexInfo(SingleShaderStage stage) const;
// The number of Metal vertex stage buffers used for the whole pipeline layout.
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
index 9b2c6b285ee..3ee7d92bb1b 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
@@ -28,8 +28,8 @@ namespace dawn_native { namespace metal {
uint32_t samplerIndex = 0;
uint32_t textureIndex = 0;
- for (uint32_t group : IterateBitSet(GetBindGroupLayoutsMask())) {
- for (BindingIndex bindingIndex = 0;
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ for (BindingIndex bindingIndex{0};
bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
const BindingInfo& bindingInfo =
GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
index d0a8bfc97ac..5983e9ea82d 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
@@ -67,6 +67,7 @@ namespace dawn_native { namespace metal {
}
MaybeError ShaderModule::Initialize() {
+ DAWN_TRY(InitializeBase());
const std::vector<uint32_t>& spirv = GetSpirv();
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
@@ -131,7 +132,7 @@ namespace dawn_native { namespace metal {
// a table of MSLResourceBinding to give to SPIRV-Cross.
// Create one resource binding entry per stage per binding.
- for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
const BindGroupLayoutBase::BindingMap& bindingMap =
layout->GetBindGroupLayout(group)->GetBindingMap();
@@ -147,8 +148,8 @@ namespace dawn_native { namespace metal {
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
shaderc_spvc_msl_resource_binding mslBinding;
mslBinding.stage = ToSpvcExecutionModel(stage);
- mslBinding.desc_set = group;
- mslBinding.binding = bindingNumber;
+ mslBinding.desc_set = static_cast<uint32_t>(group);
+ mslBinding.binding = static_cast<uint32_t>(bindingNumber);
mslBinding.msl_buffer = mslBinding.msl_texture = mslBinding.msl_sampler =
shaderIndex;
DAWN_TRY(CheckSpvcSuccess(mSpvcContext.AddMSLResourceBinding(mslBinding),
@@ -156,8 +157,8 @@ namespace dawn_native { namespace metal {
} else {
spirv_cross::MSLResourceBinding mslBinding;
mslBinding.stage = SpirvExecutionModelForStage(stage);
- mslBinding.desc_set = group;
- mslBinding.binding = bindingNumber;
+ mslBinding.desc_set = static_cast<uint32_t>(group);
+ mslBinding.binding = static_cast<uint32_t>(bindingNumber);
mslBinding.msl_buffer = mslBinding.msl_texture = mslBinding.msl_sampler =
shaderIndex;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
index adfd6322dce..19abc7facdf 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
@@ -32,7 +32,7 @@ namespace dawn_native { namespace metal {
protected:
~OldSwapChain() override;
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureBase* texture) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
};
class SwapChain final : public NewSwapChainBase {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
index 2a5ffb12431..f581da98354 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
@@ -51,7 +51,7 @@ namespace dawn_native { namespace metal {
return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture);
}
- MaybeError OldSwapChain::OnBeforePresent(TextureBase*) {
+ MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
index 8e4bce9bd3b..7ace3963a0a 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
@@ -42,21 +42,14 @@ namespace dawn_native { namespace metal {
id<MTLTexture> GetMTLTexture();
- void EnsureSubresourceContentInitialized(uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount);
+ void EnsureSubresourceContentInitialized(const SubresourceRange& range);
private:
~Texture() override;
void DestroyImpl() override;
- MaybeError ClearTexture(uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
- TextureBase::ClearValue clearValue);
+ MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
id<MTLTexture> mMtlTexture = nil;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
index 46fe75b6bc2..454a14b2739 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
@@ -54,22 +54,6 @@ namespace dawn_native { namespace metal {
return result;
}
- MTLTextureType MetalTextureType(wgpu::TextureDimension dimension,
- unsigned int arrayLayers,
- unsigned int sampleCount) {
- switch (dimension) {
- case wgpu::TextureDimension::e2D:
- if (sampleCount > 1) {
- ASSERT(arrayLayers == 1);
- return MTLTextureType2DMultisample;
- } else {
- return (arrayLayers > 1) ? MTLTextureType2DArray : MTLTextureType2D;
- }
- default:
- UNREACHABLE();
- }
- }
-
MTLTextureType MetalTextureViewType(wgpu::TextureViewDimension dimension,
unsigned int sampleCount) {
switch (dimension) {
@@ -275,7 +259,7 @@ namespace dawn_native { namespace metal {
return DAWN_VALIDATION_ERROR("IOSurface mip level count must be 1");
}
- if (descriptor->arrayLayerCount != 1) {
+ if (descriptor->size.depth != 1) {
return DAWN_VALIDATION_ERROR("IOSurface array layer count must be 1");
}
@@ -301,20 +285,38 @@ namespace dawn_native { namespace metal {
MTLTextureDescriptor* CreateMetalTextureDescriptor(const TextureDescriptor* descriptor) {
MTLTextureDescriptor* mtlDesc = [MTLTextureDescriptor new];
- mtlDesc.textureType = MetalTextureType(descriptor->dimension, descriptor->arrayLayerCount,
- descriptor->sampleCount);
- mtlDesc.usage = MetalTextureUsage(descriptor->usage);
- mtlDesc.pixelFormat = MetalPixelFormat(descriptor->format);
mtlDesc.width = descriptor->size.width;
mtlDesc.height = descriptor->size.height;
- mtlDesc.depth = descriptor->size.depth;
-
+ mtlDesc.sampleCount = descriptor->sampleCount;
+ mtlDesc.usage = MetalTextureUsage(descriptor->usage);
+ mtlDesc.pixelFormat = MetalPixelFormat(descriptor->format);
mtlDesc.mipmapLevelCount = descriptor->mipLevelCount;
- mtlDesc.arrayLength = descriptor->arrayLayerCount;
mtlDesc.storageMode = MTLStorageModePrivate;
- mtlDesc.sampleCount = descriptor->sampleCount;
+ // Choose the correct MTLTextureType and paper over differences in how the array layer count
+ // is specified.
+ mtlDesc.depth = descriptor->size.depth;
+ mtlDesc.arrayLength = 1;
+ switch (descriptor->dimension) {
+ case wgpu::TextureDimension::e2D:
+ if (mtlDesc.depth > 1) {
+ ASSERT(mtlDesc.sampleCount == 1);
+ mtlDesc.textureType = MTLTextureType2DArray;
+ mtlDesc.arrayLength = mtlDesc.depth;
+ mtlDesc.depth = 1;
+ } else {
+ if (mtlDesc.sampleCount > 1) {
+ mtlDesc.textureType = MTLTextureType2DMultisample;
+ } else {
+ mtlDesc.textureType = MTLTextureType2D;
+ }
+ }
+ break;
+
+ default:
+ UNREACHABLE();
+ }
return mtlDesc;
}
@@ -326,8 +328,8 @@ namespace dawn_native { namespace metal {
[mtlDesc release];
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- device->ConsumedError(ClearTexture(0, GetNumMipLevels(), 0, GetArrayLayers(),
- TextureBase::ClearValue::NonZero));
+ device->ConsumedError(
+ ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
}
}
@@ -351,7 +353,7 @@ namespace dawn_native { namespace metal {
plane:plane];
[mtlDesc release];
- SetIsSubresourceContentInitialized(descriptor->isCleared, 0, 1, 0, 1);
+ SetIsSubresourceContentInitialized(descriptor->isCleared, {0, 1, 0, 1});
}
Texture::~Texture() {
@@ -369,10 +371,7 @@ namespace dawn_native { namespace metal {
return mMtlTexture;
}
- MaybeError Texture::ClearTexture(uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
+ MaybeError Texture::ClearTexture(const SubresourceRange& range,
TextureBase::ClearValue clearValue) {
Device* device = ToBackend(GetDevice());
@@ -389,11 +388,13 @@ namespace dawn_native { namespace metal {
if (GetFormat().HasDepthOrStencil()) {
// Create a render pass to clear each subresource.
- for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
- for (uint32_t arrayLayer = baseArrayLayer;
- arrayLayer < baseArrayLayer + layerCount; arrayLayer++) {
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, arrayLayer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, arrayLayer))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -421,16 +422,19 @@ namespace dawn_native { namespace metal {
}
} else {
ASSERT(GetFormat().IsColor());
- MTLRenderPassDescriptor* descriptor = nil;
- uint32_t attachment = 0;
-
- // Create multiple render passes with each subresource as a color attachment to
- // clear them all.
- for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
- for (uint32_t arrayLayer = baseArrayLayer;
- arrayLayer < baseArrayLayer + layerCount; arrayLayer++) {
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ // Create multiple render passes with each subresource as a color attachment to
+ // clear them all. Only do this for array layers to ensure all attachments have
+ // the same size.
+ MTLRenderPassDescriptor* descriptor = nil;
+ uint32_t attachment = 0;
+
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, arrayLayer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, arrayLayer))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -456,16 +460,16 @@ namespace dawn_native { namespace metal {
descriptor = nil;
}
}
- }
- if (descriptor != nil) {
- commandContext->BeginRender(descriptor);
- commandContext->EndRender();
+ if (descriptor != nil) {
+ commandContext->BeginRender(descriptor);
+ commandContext->EndRender();
+ }
}
}
} else {
// Compute the buffer size big enough to fill the largest mip.
- Extent3D largestMipSize = GetMipLevelVirtualSize(baseMipLevel);
+ Extent3D largestMipSize = GetMipLevelVirtualSize(range.baseMipLevel);
// Metal validation layers: sourceBytesPerRow must be at least 64.
uint32_t largestMipBytesPerRow = std::max(
@@ -495,13 +499,15 @@ namespace dawn_native { namespace metal {
id<MTLBuffer> uploadBuffer = ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
// Encode a buffer to texture copy to clear each subresource.
- for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
Extent3D virtualSize = GetMipLevelVirtualSize(level);
- for (uint32_t arrayLayer = baseArrayLayer; arrayLayer < baseArrayLayer + layerCount;
- ++arrayLayer) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, arrayLayer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, arrayLayer))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -538,26 +544,20 @@ namespace dawn_native { namespace metal {
}
if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
- layerCount);
+ SetIsSubresourceContentInitialized(true, range);
device->IncrementLazyClearCountForTesting();
}
return {};
}
- void Texture::EnsureSubresourceContentInitialized(uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount) {
+ void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
}
- if (!IsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
- layerCount)) {
+ if (!IsSubresourceContentInitialized(range)) {
// If subresource has not been initialized, clear it to black as it could
// contain dirty bits from recycled memory
- GetDevice()->ConsumedError(ClearTexture(baseMipLevel, levelCount, baseArrayLayer,
- layerCount, TextureBase::ClearValue::Zero));
+ GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
index 0dac43e1708..adc44f0974c 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
@@ -116,6 +116,9 @@ namespace dawn_native { namespace null {
const PipelineLayoutDescriptor* descriptor) {
return new PipelineLayout(this, descriptor);
}
+ ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return new QuerySet(this, descriptor);
+ }
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
return new RenderPipeline(this, descriptor);
@@ -185,8 +188,7 @@ namespace dawn_native { namespace null {
}
MaybeError Device::WaitForIdleForDestruction() {
- // Fake all commands being completed
- AssumeCommandsComplete();
+ mPendingOperations.clear();
return {};
}
@@ -266,7 +268,7 @@ namespace dawn_native { namespace null {
struct BufferMapOperation : PendingOperation {
virtual void Execute() {
- buffer->MapOperationCompleted(serial, ptr, isWrite);
+ buffer->OnMapCommandSerialFinished(serial, isWrite);
}
Ref<Buffer> buffer;
@@ -296,14 +298,6 @@ namespace dawn_native { namespace null {
return {};
}
- void Buffer::MapOperationCompleted(uint32_t serial, void* ptr, bool isWrite) {
- if (isWrite) {
- CallMapWriteCallback(serial, WGPUBufferMapAsyncStatus_Success, ptr, GetSize());
- } else {
- CallMapReadCallback(serial, WGPUBufferMapAsyncStatus_Success, ptr, GetSize());
- }
- }
-
void Buffer::CopyFromStaging(StagingBufferBase* staging,
uint64_t sourceOffset,
uint64_t destinationOffset,
@@ -312,11 +306,10 @@ namespace dawn_native { namespace null {
memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
}
- MaybeError Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const void* data) {
- ASSERT(start + count <= GetSize());
+ void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
+ ASSERT(bufferOffset + size <= GetSize());
ASSERT(mBackingData);
- memcpy(mBackingData.get() + start, data, count);
- return {};
+ memcpy(mBackingData.get() + bufferOffset, data, size);
}
MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
@@ -341,6 +334,10 @@ namespace dawn_native { namespace null {
ToBackend(GetDevice())->AddPendingOperation(std::move(operation));
}
+ void* Buffer::GetMappedPointerImpl() {
+ return mBackingData.get();
+ }
+
void Buffer::UnmapImpl() {
}
@@ -357,6 +354,19 @@ namespace dawn_native { namespace null {
FreeCommands(&mCommands);
}
+ // QuerySet
+
+ QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+ : QuerySetBase(device, descriptor) {
+ }
+
+ QuerySet::~QuerySet() {
+ DestroyInternal();
+ }
+
+ void QuerySet::DestroyImpl() {
+ }
+
// Queue
Queue::Queue(Device* device) : QueueBase(device) {
@@ -370,6 +380,14 @@ namespace dawn_native { namespace null {
return {};
}
+ MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
+ return {};
+ }
+
// SwapChain
SwapChain::SwapChain(Device* device,
@@ -425,7 +443,7 @@ namespace dawn_native { namespace null {
return GetDevice()->CreateTexture(descriptor);
}
- MaybeError OldSwapChain::OnBeforePresent(TextureBase*) {
+ MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
index a78c8a31793..f6f361185c8 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
@@ -24,6 +24,7 @@
#include "dawn_native/ComputePipeline.h"
#include "dawn_native/Device.h"
#include "dawn_native/PipelineLayout.h"
+#include "dawn_native/QuerySet.h"
#include "dawn_native/Queue.h"
#include "dawn_native/RenderPipeline.h"
#include "dawn_native/RingBufferAllocator.h"
@@ -45,6 +46,7 @@ namespace dawn_native { namespace null {
using ComputePipeline = ComputePipelineBase;
class Device;
using PipelineLayout = PipelineLayoutBase;
+ class QuerySet;
class Queue;
using RenderPipeline = RenderPipelineBase;
using Sampler = SamplerBase;
@@ -62,6 +64,7 @@ namespace dawn_native { namespace null {
using ComputePipelineType = ComputePipeline;
using DeviceType = Device;
using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
using QueueType = Queue;
using RenderPipelineType = RenderPipeline;
using SamplerType = Sampler;
@@ -118,6 +121,8 @@ namespace dawn_native { namespace null {
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
@@ -182,17 +187,17 @@ namespace dawn_native { namespace null {
public:
Buffer(Device* device, const BufferDescriptor* descriptor);
- void MapOperationCompleted(uint32_t serial, void* ptr, bool isWrite);
void CopyFromStaging(StagingBufferBase* staging,
uint64_t sourceOffset,
uint64_t destinationOffset,
uint64_t size);
+ void DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size);
+
private:
~Buffer() override;
// Dawn API
- MaybeError SetSubDataImpl(uint32_t start, uint32_t count, const void* data) override;
MaybeError MapReadAsyncImpl(uint32_t serial) override;
MaybeError MapWriteAsyncImpl(uint32_t serial) override;
void UnmapImpl() override;
@@ -201,6 +206,7 @@ namespace dawn_native { namespace null {
bool IsMapWritable() const override;
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
void MapAsyncImplCommon(uint32_t serial, bool isWrite);
+ void* GetMappedPointerImpl() override;
std::unique_ptr<uint8_t[]> mBackingData;
};
@@ -215,6 +221,16 @@ namespace dawn_native { namespace null {
CommandIterator mCommands;
};
+ class QuerySet final : public QuerySetBase {
+ public:
+ QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+
+ private:
+ ~QuerySet() override;
+
+ void DestroyImpl() override;
+ };
+
class Queue final : public QueueBase {
public:
Queue(Device* device);
@@ -222,6 +238,10 @@ namespace dawn_native { namespace null {
private:
~Queue() override;
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) override;
};
class SwapChain final : public NewSwapChainBase {
@@ -248,7 +268,7 @@ namespace dawn_native { namespace null {
protected:
~OldSwapChain() override;
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureBase*) override;
+ MaybeError OnBeforePresent(TextureViewBase*) override;
};
class NativeSwapChainImpl {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
index 383607bff12..d96baea233d 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
@@ -14,11 +14,54 @@
#include "dawn_native/opengl/BindGroupGL.h"
+#include "dawn_native/Texture.h"
#include "dawn_native/opengl/BindGroupLayoutGL.h"
#include "dawn_native/opengl/DeviceGL.h"
namespace dawn_native { namespace opengl {
+ MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
+ const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+ for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+ const BindGroupEntry& entry = descriptor->entries[i];
+
+ const auto& it = bindingMap.find(BindingNumber(entry.binding));
+ BindingIndex bindingIndex = it->second;
+ ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+
+ const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
+ switch (bindingInfo.type) {
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ ASSERT(entry.textureView != nullptr);
+ const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
+ if (textureViewLayerCount != 1 &&
+ textureViewLayerCount !=
+ entry.textureView->GetTexture()->GetArrayLayers()) {
+ return DAWN_VALIDATION_ERROR(
+ "Currently the OpenGL backend only supports either binding a layer or "
+ "the entire texture as storage texture.");
+ }
+ } break;
+
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::ComparisonSampler:
+ break;
+
+ case wgpu::BindingType::StorageTexture:
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ return {};
+ }
+
BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
: BindGroupBase(this, device, descriptor) {
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
index ad69b640b63..f9f11514100 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
@@ -23,6 +23,8 @@ namespace dawn_native { namespace opengl {
class BindGroupLayout;
class Device;
+ MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
+
class BindGroup final : public BindGroupBase, public PlacementAllocated {
public:
BindGroup(Device* device, const BindGroupDescriptor* descriptor);
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
index 80311af20c7..7e91a4940cd 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
@@ -22,9 +22,19 @@ namespace dawn_native { namespace opengl {
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
: BufferBase(device, descriptor) {
+ // TODO(cwallez@chromium.org): Have a global "zero" buffer instead of creating a new 4-byte
+ // buffer?
+ uint64_t size = std::max(GetSize(), uint64_t(4u));
+
device->gl.GenBuffers(1, &mBuffer);
device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- device->gl.BufferData(GL_ARRAY_BUFFER, GetSize(), nullptr, GL_STATIC_DRAW);
+
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ std::vector<uint8_t> clearValues(size, 1u);
+ device->gl.BufferData(GL_ARRAY_BUFFER, size, clearValues.data(), GL_STATIC_DRAW);
+ } else {
+ device->gl.BufferData(GL_ARRAY_BUFFER, size, nullptr, GL_STATIC_DRAW);
+ }
}
Buffer::~Buffer() {
@@ -45,16 +55,8 @@ namespace dawn_native { namespace opengl {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- void* data = gl.MapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
- *mappedPointer = reinterpret_cast<uint8_t*>(data);
- return {};
- }
-
- MaybeError Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const void* data) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- gl.BufferSubData(GL_ARRAY_BUFFER, start, count, data);
+ mMappedData = gl.MapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
+ *mappedPointer = reinterpret_cast<uint8_t*>(mMappedData);
return {};
}
@@ -64,8 +66,7 @@ namespace dawn_native { namespace opengl {
// TODO(cwallez@chromium.org): this does GPU->CPU synchronization, we could require a high
// version of OpenGL that would let us map the buffer unsynchronized.
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- void* data = gl.MapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
- CallMapReadCallback(serial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
+ mMappedData = gl.MapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
return {};
}
@@ -75,16 +76,20 @@ namespace dawn_native { namespace opengl {
// TODO(cwallez@chromium.org): this does GPU->CPU synchronization, we could require a high
// version of OpenGL that would let us map the buffer unsynchronized.
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- void* data = gl.MapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
- CallMapWriteCallback(serial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
+ mMappedData = gl.MapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
return {};
}
+ void* Buffer::GetMappedPointerImpl() {
+ return mMappedData;
+ }
+
void Buffer::UnmapImpl() {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
gl.UnmapBuffer(GL_ARRAY_BUFFER);
+ mMappedData = nullptr;
}
void Buffer::DestroyImpl() {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h
index 2211a5c560a..9949829a4be 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h
@@ -32,7 +32,6 @@ namespace dawn_native { namespace opengl {
private:
~Buffer() override;
// Dawn API
- MaybeError SetSubDataImpl(uint32_t start, uint32_t count, const void* data) override;
MaybeError MapReadAsyncImpl(uint32_t serial) override;
MaybeError MapWriteAsyncImpl(uint32_t serial) override;
void UnmapImpl() override;
@@ -40,8 +39,10 @@ namespace dawn_native { namespace opengl {
bool IsMapWritable() const override;
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ void* GetMappedPointerImpl() override;
GLuint mBuffer = 0;
+ void* mMappedData = nullptr;
};
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
index 9ee4e08b2e6..28c606adeac 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
@@ -225,7 +225,8 @@ namespace dawn_native { namespace opengl {
}
void Apply(const OpenGLFunctions& gl) {
- for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ for (BindGroupIndex index :
+ IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
ApplyBindGroup(gl, index, mBindGroups[index], mDynamicOffsetCounts[index],
mDynamicOffsets[index].data());
}
@@ -234,14 +235,14 @@ namespace dawn_native { namespace opengl {
private:
void ApplyBindGroup(const OpenGLFunctions& gl,
- uint32_t index,
+ BindGroupIndex index,
BindGroupBase* group,
uint32_t dynamicOffsetCount,
uint64_t* dynamicOffsets) {
const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
uint32_t currentDynamicOffsetIndex = 0;
- for (BindingIndex bindingIndex = 0;
+ for (BindingIndex bindingIndex{0};
bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
const BindingInfo& bindingInfo =
group->GetLayout()->GetBindingInfo(bindingIndex);
@@ -312,9 +313,45 @@ namespace dawn_native { namespace opengl {
break;
}
- case wgpu::BindingType::StorageTexture:
case wgpu::BindingType::ReadonlyStorageTexture:
- case wgpu::BindingType::WriteonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ TextureView* view =
+ ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ Texture* texture = ToBackend(view->GetTexture());
+ GLuint handle = texture->GetHandle();
+ GLuint imageIndex = indices[bindingIndex];
+
+ GLenum access;
+ switch (bindingInfo.type) {
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ access = GL_READ_ONLY;
+ break;
+ case wgpu::BindingType::WriteonlyStorageTexture:
+ access = GL_WRITE_ONLY;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // OpenGL ES only supports either binding a layer or the entire texture
+ // in glBindImageTexture().
+ GLboolean isLayered;
+ if (view->GetLayerCount() == 1) {
+ isLayered = GL_FALSE;
+ } else if (texture->GetArrayLayers() == view->GetLayerCount()) {
+ isLayered = GL_TRUE;
+ } else {
+ UNREACHABLE();
+ }
+
+ gl.BindImageTexture(imageIndex, handle, view->GetBaseMipLevel(),
+ isLayered, view->GetBaseArrayLayer(), access,
+ texture->GetGLFormat().internalFormat);
+ break;
+ }
+
+ case wgpu::BindingType::StorageTexture:
UNREACHABLE();
break;
@@ -422,8 +459,7 @@ namespace dawn_native { namespace opengl {
// cleared in BeginRenderPass by setting the loadop to clear when the
// texture subresource has not been initialized before the render pass.
if (!(usages.textureUsages[i].usage & wgpu::TextureUsage::OutputAttachment)) {
- texture->EnsureSubresourceContentInitialized(0, texture->GetNumMipLevels(), 0,
- texture->GetArrayLayers());
+ texture->EnsureSubresourceContentInitialized(texture->GetAllSubresources());
}
}
};
@@ -477,12 +513,17 @@ namespace dawn_native { namespace opengl {
Texture* texture = ToBackend(dst.texture.Get());
GLenum target = texture->GetGLTarget();
const GLFormat& format = texture->GetGLFormat();
+
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ // TODO(jiawei.shao@intel.com): use copy->destination.origin.z instead of
+ // copy->destination.arrayLayer once GPUTextureCopyView.arrayLayer to
+ // GPUTextureCopyView.origin.z is done.
+ SubresourceRange subresources = {dst.mipLevel, 1, dst.arrayLayer,
+ copy->copySize.depth};
if (IsCompleteSubresourceCopiedTo(texture, copySize, dst.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, dst.mipLevel, 1,
- dst.arrayLayer, 1);
+ texture->SetIsSubresourceContentInitialized(true, subresources);
} else {
- texture->EnsureSubresourceContentInitialized(dst.mipLevel, 1,
- dst.arrayLayer, 1);
+ texture->EnsureSubresourceContentInitialized(subresources);
}
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
@@ -510,8 +551,8 @@ namespace dawn_native { namespace opengl {
if (texture->GetArrayLayers() > 1) {
gl.CompressedTexSubImage3D(
target, dst.mipLevel, dst.origin.x, dst.origin.y, dst.arrayLayer,
- copyExtent.width, copyExtent.height, 1, format.internalFormat,
- copyDataSize,
+ copyExtent.width, copyExtent.height, copyExtent.depth,
+ format.internalFormat, copyDataSize,
reinterpret_cast<void*>(static_cast<uintptr_t>(src.offset)));
} else {
gl.CompressedTexSubImage2D(
@@ -525,7 +566,8 @@ namespace dawn_native { namespace opengl {
if (texture->GetArrayLayers() > 1) {
gl.TexSubImage3D(target, dst.mipLevel, dst.origin.x,
dst.origin.y, dst.arrayLayer, copySize.width,
- copySize.height, 1, format.format, format.type,
+ copySize.height, copySize.depth, format.format,
+ format.type,
reinterpret_cast<void*>(
static_cast<uintptr_t>(src.offset)));
} else {
@@ -556,17 +598,20 @@ namespace dawn_native { namespace opengl {
auto& copySize = copy->copySize;
Texture* texture = ToBackend(src.texture.Get());
Buffer* buffer = ToBackend(dst.buffer.Get());
- const GLFormat& format = texture->GetGLFormat();
+ const Format& format = texture->GetFormat();
+ const GLFormat& glFormat = texture->GetGLFormat();
GLenum target = texture->GetGLTarget();
// TODO(jiawei.shao@intel.com): support texture-to-buffer copy with compressed
// texture formats.
- if (texture->GetFormat().isCompressed) {
+ if (format.isCompressed) {
UNREACHABLE();
}
- texture->EnsureSubresourceContentInitialized(src.mipLevel, 1, src.arrayLayer,
- 1);
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ SubresourceRange subresources = {src.mipLevel, 1, src.arrayLayer,
+ copy->copySize.depth};
+ texture->EnsureSubresourceContentInitialized(subresources);
// The only way to move data from a texture to a buffer in GL is via
// glReadPixels with a pack buffer. Create a temporary FBO for the copy.
gl.BindTexture(target, texture->GetHandle());
@@ -574,31 +619,62 @@ namespace dawn_native { namespace opengl {
GLuint readFBO = 0;
gl.GenFramebuffers(1, &readFBO);
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
+
+ GLenum glAttachment = 0;
+ switch (format.aspect) {
+ case Format::Aspect::Color:
+ glAttachment = GL_COLOR_ATTACHMENT0;
+ break;
+ case Format::Aspect::Depth:
+ glAttachment = GL_DEPTH_ATTACHMENT;
+ break;
+ case Format::Aspect::Stencil:
+ glAttachment = GL_STENCIL_ATTACHMENT;
+ break;
+ case Format::Aspect::DepthStencil:
+ glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
+ gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / format.blockByteSize);
+ gl.PixelStorei(GL_PACK_IMAGE_HEIGHT, dst.rowsPerImage);
+
+ uint8_t* offset =
+ reinterpret_cast<uint8_t*>(static_cast<uintptr_t>(dst.offset));
switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e2D:
- if (texture->GetArrayLayers() > 1) {
- gl.FramebufferTextureLayer(
- GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture->GetHandle(),
- src.mipLevel, src.arrayLayer);
- } else {
- gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- GL_TEXTURE_2D, texture->GetHandle(),
- src.mipLevel);
+ case wgpu::TextureDimension::e2D: {
+ if (texture->GetArrayLayers() == 1) {
+ gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, target,
+ texture->GetHandle(), src.mipLevel);
+ gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+ copySize.height, glFormat.format, glFormat.type,
+ offset);
+ break;
+ }
+
+ const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
+ for (uint32_t layer = 0; layer < copySize.depth; ++layer) {
+ gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
+ texture->GetHandle(), src.mipLevel,
+ src.arrayLayer + layer);
+ gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+ copySize.height, glFormat.format, glFormat.type,
+ offset);
+
+ offset += bytesPerImage;
}
+
break;
+ }
default:
UNREACHABLE();
}
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
- gl.PixelStorei(GL_PACK_ROW_LENGTH,
- dst.bytesPerRow / texture->GetFormat().blockByteSize);
- gl.PixelStorei(GL_PACK_IMAGE_HEIGHT, dst.rowsPerImage);
- ASSERT(copySize.depth == 1 && src.origin.z == 0);
- void* offset = reinterpret_cast<void*>(static_cast<uintptr_t>(dst.offset));
- gl.ReadPixels(src.origin.x, src.origin.y, copySize.width, copySize.height,
- format.format, format.type, offset);
gl.PixelStorei(GL_PACK_ROW_LENGTH, 0);
gl.PixelStorei(GL_PACK_IMAGE_HEIGHT, 0);
@@ -620,20 +696,22 @@ namespace dawn_native { namespace opengl {
Extent3D copySize = ComputeTextureCopyExtent(dst, copy->copySize);
Texture* srcTexture = ToBackend(src.texture.Get());
Texture* dstTexture = ToBackend(dst.texture.Get());
- srcTexture->EnsureSubresourceContentInitialized(src.mipLevel, 1, src.arrayLayer,
- 1);
+ SubresourceRange srcRange = {src.mipLevel, 1, src.arrayLayer,
+ copy->copySize.depth};
+ SubresourceRange dstRange = {dst.mipLevel, 1, dst.arrayLayer,
+ copy->copySize.depth};
+
+ srcTexture->EnsureSubresourceContentInitialized(srcRange);
if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
- dstTexture->SetIsSubresourceContentInitialized(true, dst.mipLevel, 1,
- dst.arrayLayer, 1);
+ dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
} else {
- dstTexture->EnsureSubresourceContentInitialized(dst.mipLevel, 1,
- dst.arrayLayer, 1);
+ dstTexture->EnsureSubresourceContentInitialized(dstRange);
}
gl.CopyImageSubData(srcTexture->GetHandle(), srcTexture->GetGLTarget(),
src.mipLevel, src.origin.x, src.origin.y, src.arrayLayer,
dstTexture->GetHandle(), dstTexture->GetGLTarget(),
dst.mipLevel, dst.origin.x, dst.origin.y, dst.arrayLayer,
- copySize.width, copySize.height, 1);
+ copySize.width, copySize.height, copy->copySize.depth);
break;
}
@@ -790,8 +868,15 @@ namespace dawn_native { namespace opengl {
break;
}
- GLenum target = ToBackend(textureView->GetTexture())->GetGLTarget();
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, texture, 0);
+ if (textureView->GetTexture()->GetArrayLayers() == 1) {
+ GLenum target = ToBackend(textureView->GetTexture())->GetGLTarget();
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, texture,
+ textureView->GetBaseMipLevel());
+ } else {
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, texture,
+ textureView->GetBaseMipLevel(),
+ textureView->GetBaseArrayLayer());
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
index 45081762625..f47474f2aef 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
@@ -24,6 +24,7 @@
#include "dawn_native/opengl/CommandBufferGL.h"
#include "dawn_native/opengl/ComputePipelineGL.h"
#include "dawn_native/opengl/PipelineLayoutGL.h"
+#include "dawn_native/opengl/QuerySetGL.h"
#include "dawn_native/opengl/QueueGL.h"
#include "dawn_native/opengl/RenderPipelineGL.h"
#include "dawn_native/opengl/SamplerGL.h"
@@ -94,6 +95,7 @@ namespace dawn_native { namespace opengl {
ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
+ DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
return BindGroup::Create(this, descriptor);
}
ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
@@ -115,6 +117,9 @@ namespace dawn_native { namespace opengl {
const PipelineLayoutDescriptor* descriptor) {
return new PipelineLayout(this, descriptor);
}
+ ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return new QuerySet(this, descriptor);
+ }
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
return new RenderPipeline(this, descriptor);
@@ -152,7 +157,6 @@ namespace dawn_native { namespace opengl {
}
MaybeError Device::TickImpl() {
- CheckPassedSerials();
return {};
}
@@ -194,21 +198,13 @@ namespace dawn_native { namespace opengl {
void Device::ShutDownImpl() {
ASSERT(GetState() == State::Disconnected);
-
- // Some operations might have been started since the last submit and waiting
- // on a serial that doesn't have a corresponding fence enqueued. Force all
- // operations to look as if they were completed (because they were).
- AssumeCommandsComplete();
}
MaybeError Device::WaitForIdleForDestruction() {
gl.Finish();
CheckPassedSerials();
ASSERT(mFencesInFlight.empty());
- Tick();
- // Force all operations to look as if they were completed
- AssumeCommandsComplete();
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
index c38fcf67f94..4a03f4aeaac 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
@@ -19,6 +19,7 @@
#include "common/Platform.h"
#include "dawn_native/Device.h"
+#include "dawn_native/QuerySet.h"
#include "dawn_native/opengl/Forward.h"
#include "dawn_native/opengl/GLFormat.h"
#include "dawn_native/opengl/OpenGLFunctions.h"
@@ -75,6 +76,8 @@ namespace dawn_native { namespace opengl {
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h b/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h
index bd2cc76ca7a..82d07661ae8 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h
@@ -28,6 +28,7 @@ namespace dawn_native { namespace opengl {
class Device;
class PersistentPipelineState;
class PipelineLayout;
+ class QuerySet;
class Queue;
class RenderPipeline;
class Sampler;
@@ -45,6 +46,7 @@ namespace dawn_native { namespace opengl {
using ComputePipelineType = ComputePipeline;
using DeviceType = Device;
using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
using QueueType = Queue;
using RenderPipelineType = RenderPipeline;
using SamplerType = Sampler;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
index 926efc5f050..76c24b6b407 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
@@ -106,7 +106,7 @@ namespace dawn_native { namespace opengl {
// etc.
const auto& indices = layout->GetBindingIndexInfo();
- for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(group);
for (const auto& it : bgl->GetBindingMap()) {
@@ -142,9 +142,16 @@ namespace dawn_native { namespace opengl {
// emulation
break;
- case wgpu::BindingType::StorageTexture:
case wgpu::BindingType::ReadonlyStorageTexture:
- case wgpu::BindingType::WriteonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ GLint location = gl.GetUniformLocation(mProgram, name.c_str());
+ if (location != -1) {
+ gl.Uniform1i(location, indices[group][bindingIndex]);
+ }
+ break;
+ }
+
+ case wgpu::BindingType::StorageTexture:
UNREACHABLE();
break;
@@ -176,20 +183,29 @@ namespace dawn_native { namespace opengl {
gl.Uniform1i(location, textureUnit);
- GLuint textureIndex =
- indices[combined.textureLocation.group][combined.textureLocation.binding];
- mUnitsForTextures[textureIndex].push_back(textureUnit);
+ bool shouldUseFiltering;
+ {
+ const BindGroupLayoutBase* bgl =
+ layout->GetBindGroupLayout(combined.textureLocation.group);
+ BindingIndex bindingIndex =
+ bgl->GetBindingIndex(combined.textureLocation.binding);
- const BindGroupLayoutBase* bgl =
- layout->GetBindGroupLayout(combined.textureLocation.group);
- Format::Type componentType =
- bgl->GetBindingInfo(bgl->GetBindingIndex(combined.textureLocation.binding))
- .textureComponentType;
- bool shouldUseFiltering = componentType == Format::Type::Float;
+ GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
+ mUnitsForTextures[textureIndex].push_back(textureUnit);
- GLuint samplerIndex =
- indices[combined.samplerLocation.group][combined.samplerLocation.binding];
- mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
+ Format::Type componentType =
+ bgl->GetBindingInfo(bindingIndex).textureComponentType;
+ shouldUseFiltering = componentType == Format::Type::Float;
+ }
+ {
+ const BindGroupLayoutBase* bgl =
+ layout->GetBindGroupLayout(combined.samplerLocation.group);
+ BindingIndex bindingIndex =
+ bgl->GetBindingIndex(combined.samplerLocation.binding);
+
+ GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
+ mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
+ }
textureUnit++;
}
@@ -203,7 +219,7 @@ namespace dawn_native { namespace opengl {
}
const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
- ASSERT(index < mUnitsForSamplers.size());
+ ASSERT(index < mUnitsForTextures.size());
return mUnitsForTextures[index];
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
index 6a081c119e5..7f681f94b8a 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
@@ -36,8 +36,9 @@ namespace dawn_native { namespace opengl {
const PipelineLayout* layout,
const PerStage<const ShaderModule*>& modules);
- using BindingLocations =
- std::array<std::array<GLint, kMaxBindingsPerGroup>, kMaxBindGroups>;
+ using BindingLocations = ityp::array<BindGroupIndex,
+ ityp::array<BindingIndex, GLint, kMaxBindingsPerGroup>,
+ kMaxBindGroups>;
// For each unit a sampler is bound to we need to know if we should use filtering or not
// because int and uint texture are only complete without filtering.
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
index 082a25bc4f2..0e98c617610 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
@@ -26,11 +26,12 @@ namespace dawn_native { namespace opengl {
GLuint samplerIndex = 0;
GLuint sampledTextureIndex = 0;
GLuint ssboIndex = 0;
+ GLuint storageTextureIndex = 0;
- for (uint32_t group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
- for (BindingIndex bindingIndex = 0; bindingIndex < bgl->GetBindingCount();
+ for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount();
++bindingIndex) {
switch (bgl->GetBindingInfo(bindingIndex).type) {
case wgpu::BindingType::UniformBuffer:
@@ -53,9 +54,13 @@ namespace dawn_native { namespace opengl {
ssboIndex++;
break;
- case wgpu::BindingType::StorageTexture:
case wgpu::BindingType::ReadonlyStorageTexture:
case wgpu::BindingType::WriteonlyStorageTexture:
+ mIndexInfo[group][bindingIndex] = storageTextureIndex;
+ storageTextureIndex++;
+ break;
+
+ case wgpu::BindingType::StorageTexture:
UNREACHABLE();
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h
index fb03aaf7cec..3d511d6d513 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h
@@ -17,6 +17,8 @@
#include "dawn_native/PipelineLayout.h"
+#include "common/ityp_array.h"
+#include "dawn_native/BindingInfo.h"
#include "dawn_native/opengl/opengl_platform.h"
namespace dawn_native { namespace opengl {
@@ -28,7 +30,9 @@ namespace dawn_native { namespace opengl {
PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
using BindingIndexInfo =
- std::array<std::array<GLuint, kMaxBindingsPerGroup>, kMaxBindGroups>;
+ ityp::array<BindGroupIndex,
+ ityp::array<BindingIndex, GLuint, kMaxBindingsPerGroup>,
+ kMaxBindGroups>;
const BindingIndexInfo& GetBindingIndexInfo() const;
GLuint GetTextureUnitsUsed() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.cpp
new file mode 100644
index 00000000000..6ff5d20603f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.cpp
@@ -0,0 +1,32 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/opengl/QuerySetGL.h"
+
+#include "dawn_native/opengl/DeviceGL.h"
+
+namespace dawn_native { namespace opengl {
+
+ QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+ : QuerySetBase(device, descriptor) {
+ }
+
+ QuerySet::~QuerySet() {
+ DestroyInternal();
+ }
+
+ void QuerySet::DestroyImpl() {
+ }
+
+}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.h
new file mode 100644
index 00000000000..2a83bdd0468
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.h
@@ -0,0 +1,36 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_QUERYSETGL_H_
+#define DAWNNATIVE_OPENGL_QUERYSETGL_H_
+
+#include "dawn_native/QuerySet.h"
+
+namespace dawn_native { namespace opengl {
+
+ class Device;
+
+ class QuerySet final : public QuerySetBase {
+ public:
+ QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+
+ private:
+ ~QuerySet() override;
+
+ void DestroyImpl() override;
+ };
+
+}} // namespace dawn_native::opengl
+
+#endif // DAWNNATIVE_OPENGL_QUERYSETGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
index 9e08f6ae458..a33cbd0da5f 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/opengl/QueueGL.h"
+#include "dawn_native/opengl/BufferGL.h"
#include "dawn_native/opengl/CommandBufferGL.h"
#include "dawn_native/opengl/DeviceGL.h"
#include "dawn_platform/DawnPlatform.h"
@@ -37,4 +38,15 @@ namespace dawn_native { namespace opengl {
return {};
}
+ MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
+ gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
+ return {};
+ }
+
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
index d62e90d09a4..301b1ad7007 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
@@ -28,6 +28,10 @@ namespace dawn_native { namespace opengl {
private:
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) override;
};
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
index 89797167516..53aa101ad1b 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
@@ -24,9 +24,10 @@
namespace dawn_native { namespace opengl {
- std::string GetBindingName(uint32_t group, uint32_t binding) {
+ std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
std::ostringstream o;
- o << "dawn_binding_" << group << "_" << binding;
+ o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
+ << static_cast<uint32_t>(bindingNumber);
return o.str();
}
@@ -42,8 +43,10 @@ namespace dawn_native { namespace opengl {
std::string CombinedSampler::GetName() const {
std::ostringstream o;
o << "dawn_combined";
- o << "_" << samplerLocation.group << "_" << samplerLocation.binding;
- o << "_with_" << textureLocation.group << "_" << textureLocation.binding;
+ o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
+ << static_cast<uint32_t>(samplerLocation.binding);
+ o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
+ << static_cast<uint32_t>(textureLocation.binding);
return o.str();
}
@@ -68,6 +71,7 @@ namespace dawn_native { namespace opengl {
}
MaybeError ShaderModule::Initialize() {
+ DAWN_TRY(InitializeBase());
const std::vector<uint32_t>& spirv = GetSpirv();
std::unique_ptr<spirv_cross::CompilerGLSL> compilerImpl;
@@ -139,15 +143,26 @@ namespace dawn_native { namespace opengl {
mCombinedInfo.emplace_back();
auto& info = mCombinedInfo.back();
+ uint32_t samplerGroup;
mSpvcContext.GetDecoration(sampler.sampler_id,
- shaderc_spvc_decoration_descriptorset,
- &info.samplerLocation.group);
+ shaderc_spvc_decoration_descriptorset, &samplerGroup);
+ info.samplerLocation.group = BindGroupIndex(samplerGroup);
+
+ uint32_t samplerBinding;
mSpvcContext.GetDecoration(sampler.sampler_id, shaderc_spvc_decoration_binding,
- &info.samplerLocation.binding);
+ &samplerBinding);
+ info.samplerLocation.binding = BindingNumber(samplerBinding);
+
+ uint32_t textureGroup;
mSpvcContext.GetDecoration(sampler.image_id, shaderc_spvc_decoration_descriptorset,
- &info.textureLocation.group);
+ &textureGroup);
+ info.textureLocation.group = BindGroupIndex(textureGroup);
+
+ uint32_t textureBinding;
mSpvcContext.GetDecoration(sampler.image_id, shaderc_spvc_decoration_binding,
- &info.textureLocation.binding);
+ &textureBinding);
+ info.textureLocation.binding = BindingNumber(textureBinding);
+
mSpvcContext.SetName(sampler.combined_id, info.GetName());
}
} else {
@@ -155,14 +170,14 @@ namespace dawn_native { namespace opengl {
mCombinedInfo.emplace_back();
auto& info = mCombinedInfo.back();
- info.samplerLocation.group =
- compiler->get_decoration(combined.sampler_id, spv::DecorationDescriptorSet);
- info.samplerLocation.binding =
- compiler->get_decoration(combined.sampler_id, spv::DecorationBinding);
- info.textureLocation.group =
- compiler->get_decoration(combined.image_id, spv::DecorationDescriptorSet);
- info.textureLocation.binding =
- compiler->get_decoration(combined.image_id, spv::DecorationBinding);
+ info.samplerLocation.group = BindGroupIndex(
+ compiler->get_decoration(combined.sampler_id, spv::DecorationDescriptorSet));
+ info.samplerLocation.binding = BindingNumber(
+ compiler->get_decoration(combined.sampler_id, spv::DecorationBinding));
+ info.textureLocation.group = BindGroupIndex(
+ compiler->get_decoration(combined.image_id, spv::DecorationDescriptorSet));
+ info.textureLocation.binding = BindingNumber(
+ compiler->get_decoration(combined.image_id, spv::DecorationBinding));
compiler->set_name(combined.combined_id, info.GetName());
}
}
@@ -170,17 +185,31 @@ namespace dawn_native { namespace opengl {
// Change binding names to be "dawn_binding_<group>_<binding>".
// Also unsets the SPIRV "Binding" decoration as it outputs "layout(binding=)" which
// isn't supported on OSX's OpenGL.
- for (uint32_t group = 0; group < kMaxBindGroups; ++group) {
+ for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
for (const auto& it : bindingInfo[group]) {
BindingNumber bindingNumber = it.first;
const auto& info = it.second;
+ uint32_t resourceId;
+ switch (info.type) {
+ // When the resource is a uniform or shader storage block, we should change the
+ // block name instead of the instance name.
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::UniformBuffer:
+ resourceId = info.base_type_id;
+ break;
+ default:
+ resourceId = info.id;
+ break;
+ }
+
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
- mSpvcContext.SetName(info.base_type_id, GetBindingName(group, bindingNumber));
+ mSpvcContext.SetName(resourceId, GetBindingName(group, bindingNumber));
mSpvcContext.UnsetDecoration(info.id, shaderc_spvc_decoration_binding);
mSpvcContext.UnsetDecoration(info.id, shaderc_spvc_decoration_descriptorset);
} else {
- compiler->set_name(info.base_type_id, GetBindingName(group, bindingNumber));
+ compiler->set_name(resourceId, GetBindingName(group, bindingNumber));
compiler->unset_decoration(info.id, spv::DecorationBinding);
compiler->unset_decoration(info.id, spv::DecorationDescriptorSet);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
index 9e2b5c9e146..3ddd19a7f18 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
@@ -23,11 +23,11 @@ namespace dawn_native { namespace opengl {
class Device;
- std::string GetBindingName(uint32_t group, uint32_t binding);
+ std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
struct BindingLocation {
- uint32_t group;
- uint32_t binding;
+ BindGroupIndex group;
+ BindingNumber binding;
};
bool operator<(const BindingLocation& a, const BindingLocation& b);
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
index 40efd0a27df..8223a2ceb12 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
@@ -44,7 +44,7 @@ namespace dawn_native { namespace opengl {
TextureBase::TextureState::OwnedExternal);
}
- MaybeError SwapChain::OnBeforePresent(TextureBase*) {
+ MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
index a483e70c6b0..0cce92594ac 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
@@ -30,7 +30,7 @@ namespace dawn_native { namespace opengl {
protected:
~SwapChain() override;
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureBase* texture) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
};
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
index 9f2c163020a..ee64bcda572 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
@@ -28,7 +28,7 @@ namespace dawn_native { namespace opengl {
GLenum TargetForTexture(const TextureDescriptor* descriptor) {
switch (descriptor->dimension) {
case wgpu::TextureDimension::e2D:
- if (descriptor->arrayLayerCount > 1) {
+ if (descriptor->size.depth > 1) {
ASSERT(descriptor->sampleCount == 1);
return GL_TEXTURE_2D_ARRAY;
} else {
@@ -46,11 +46,15 @@ namespace dawn_native { namespace opengl {
}
GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
+ uint32_t arrayLayerCount,
uint32_t sampleCount) {
switch (dimension) {
case wgpu::TextureViewDimension::e2D:
return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
case wgpu::TextureViewDimension::e2DArray:
+ if (arrayLayerCount == 1) {
+ return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
+ }
ASSERT(sampleCount == 1);
return GL_TEXTURE_2D_ARRAY;
case wgpu::TextureViewDimension::Cube:
@@ -108,8 +112,8 @@ namespace dawn_native { namespace opengl {
: Texture(device, descriptor, GenTexture(device->gl), TextureState::OwnedInternal) {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- uint32_t width = GetSize().width;
- uint32_t height = GetSize().height;
+ uint32_t width = GetWidth();
+ uint32_t height = GetHeight();
uint32_t levels = GetNumMipLevels();
uint32_t arrayLayers = GetArrayLayers();
uint32_t sampleCount = GetSampleCount();
@@ -145,8 +149,8 @@ namespace dawn_native { namespace opengl {
gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- GetDevice()->ConsumedError(ClearTexture(0, GetNumMipLevels(), 0, GetArrayLayers(),
- TextureBase::ClearValue::NonZero));
+ GetDevice()->ConsumedError(
+ ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
}
}
@@ -181,10 +185,7 @@ namespace dawn_native { namespace opengl {
return ToBackend(GetDevice())->GetGLFormat(GetFormat());
}
- MaybeError Texture::ClearTexture(GLint baseMipLevel,
- GLint levelCount,
- GLint baseArrayLayer,
- GLint layerCount,
+ MaybeError Texture::ClearTexture(const SubresourceRange& range,
TextureBase::ClearValue clearValue) {
// TODO(jiawei.shao@intel.com): initialize the textures with compressed formats.
if (GetFormat().isCompressed) {
@@ -224,31 +225,35 @@ namespace dawn_native { namespace opengl {
gl.GenFramebuffers(1, &framebuffer);
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
- for (GLint level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
switch (GetDimension()) {
case wgpu::TextureDimension::e2D:
if (GetArrayLayers() == 1) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, 0, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, 0))) {
// Skip lazy clears if already initialized.
continue;
}
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER,
GL_DEPTH_STENCIL_ATTACHMENT, GetGLTarget(),
- GetHandle(), level);
+ GetHandle(), static_cast<GLint>(level));
DoClear();
} else {
- for (GLint layer = baseArrayLayer;
- layer < baseArrayLayer + layerCount; ++layer) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, layer))) {
// Skip lazy clears if already initialized.
continue;
}
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER,
- GL_DEPTH_STENCIL_ATTACHMENT,
- GetHandle(), level, layer);
+ gl.FramebufferTextureLayer(
+ GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
+ GetHandle(), static_cast<GLint>(level),
+ static_cast<GLint>(layer));
DoClear();
}
}
@@ -268,16 +273,19 @@ namespace dawn_native { namespace opengl {
clearColorData.fill(clearColor);
const GLFormat& glFormat = GetGLFormat();
- for (GLint level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
Extent3D mipSize = GetMipLevelPhysicalSize(level);
- for (GLint layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
- ++layer) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, layer))) {
// Skip lazy clears if already initialized.
continue;
}
- gl.ClearTexSubImage(mHandle, level, 0, 0, layer, mipSize.width,
+ gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
+ static_cast<GLint>(layer), mipSize.width,
mipSize.height, 1, glFormat.format, glFormat.type,
clearColorData.data());
}
@@ -288,15 +296,15 @@ namespace dawn_native { namespace opengl {
// create temp buffer with clear color to copy to the texture image
ASSERT(kTextureBytesPerRowAlignment % GetFormat().blockByteSize == 0);
uint32_t bytesPerRow =
- Align((GetSize().width / GetFormat().blockWidth) * GetFormat().blockByteSize,
+ Align((GetWidth() / GetFormat().blockWidth) * GetFormat().blockByteSize,
kTextureBytesPerRowAlignment);
// Make sure that we are not rounding
ASSERT(bytesPerRow % GetFormat().blockByteSize == 0);
- ASSERT(GetSize().height % GetFormat().blockHeight == 0);
+ ASSERT(GetHeight() % GetFormat().blockHeight == 0);
dawn_native::BufferDescriptor descriptor;
- descriptor.size = bytesPerRow * (GetSize().height / GetFormat().blockHeight);
+ descriptor.size = bytesPerRow * (GetHeight() / GetFormat().blockHeight);
if (descriptor.size > std::numeric_limits<uint32_t>::max()) {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
}
@@ -319,7 +327,8 @@ namespace dawn_native { namespace opengl {
gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
(bytesPerRow / GetFormat().blockByteSize) * GetFormat().blockWidth);
gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
- for (GLint level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
gl.ActiveTexture(GL_TEXTURE0);
gl.BindTexture(GetGLTarget(), GetHandle());
@@ -329,23 +338,26 @@ namespace dawn_native { namespace opengl {
case wgpu::TextureDimension::e2D:
if (GetArrayLayers() == 1) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, 0, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, 0))) {
// Skip lazy clears if already initialized.
continue;
}
- gl.TexSubImage2D(GetGLTarget(), level, 0, 0, size.width, size.height,
- GetGLFormat().format, GetGLFormat().type, 0);
+ gl.TexSubImage2D(GetGLTarget(), static_cast<GLint>(level), 0, 0,
+ size.width, size.height, GetGLFormat().format,
+ GetGLFormat().type, 0);
} else {
- for (GLint layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
- ++layer) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, layer))) {
// Skip lazy clears if already initialized.
continue;
}
- gl.TexSubImage3D(GetGLTarget(), level, 0, 0, layer, size.width,
- size.height, 1, GetGLFormat().format,
- GetGLFormat().type, 0);
+ gl.TexSubImage3D(GetGLTarget(), static_cast<GLint>(level), 0, 0,
+ static_cast<GLint>(layer), size.width, size.height,
+ 1, GetGLFormat().format, GetGLFormat().type, 0);
}
}
break;
@@ -360,24 +372,18 @@ namespace dawn_native { namespace opengl {
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
- layerCount);
+ SetIsSubresourceContentInitialized(true, range);
device->IncrementLazyClearCountForTesting();
}
return {};
}
- void Texture::EnsureSubresourceContentInitialized(uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount) {
+ void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
}
- if (!IsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
- layerCount)) {
- GetDevice()->ConsumedError(ClearTexture(baseMipLevel, levelCount, baseArrayLayer,
- layerCount, TextureBase::ClearValue::Zero));
+ if (!IsSubresourceContentInitialized(range)) {
+ GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
}
}
@@ -385,7 +391,8 @@ namespace dawn_native { namespace opengl {
TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
: TextureViewBase(texture, descriptor), mOwnsHandle(false) {
- mTarget = TargetForTextureViewDimension(descriptor->dimension, texture->GetSampleCount());
+ mTarget = TargetForTextureViewDimension(descriptor->dimension, descriptor->arrayLayerCount,
+ texture->GetSampleCount());
if (!UsageNeedsTextureView(texture->GetUsage())) {
mHandle = 0;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
index b48cda335d4..a01b94348bc 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
@@ -36,20 +36,13 @@ namespace dawn_native { namespace opengl {
GLenum GetGLTarget() const;
const GLFormat& GetGLFormat() const;
- void EnsureSubresourceContentInitialized(uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount);
+ void EnsureSubresourceContentInitialized(const SubresourceRange& range);
private:
~Texture() override;
void DestroyImpl() override;
- MaybeError ClearTexture(GLint baseMipLevel,
- GLint levelCount,
- GLint baseArrayLayer,
- GLint layerCount,
- TextureBase::ClearValue clearValue);
+ MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
GLuint mHandle;
GLenum mTarget;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
index 9cbe82d010d..43ed6acbc2c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
@@ -39,7 +39,7 @@ namespace dawn_native { namespace vulkan {
MaybeError Adapter::Initialize() {
DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
- if (!mDeviceInfo.maintenance1) {
+ if (!mDeviceInfo.HasExt(DeviceExt::Maintenance1)) {
return DAWN_INTERNAL_ERROR(
"Dawn requires Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 in order to support "
"viewport flipY");
@@ -73,6 +73,21 @@ namespace dawn_native { namespace vulkan {
if (mDeviceInfo.features.textureCompressionBC == VK_TRUE) {
mSupportedExtensions.EnableExtension(Extension::TextureCompressionBC);
}
+
+ if (mDeviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
+ mDeviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
+ mDeviceInfo.HasExt(DeviceExt::_16BitStorage) &&
+ mDeviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE) {
+ mSupportedExtensions.EnableExtension(Extension::ShaderFloat16);
+ }
+
+ if (mDeviceInfo.features.pipelineStatisticsQuery == VK_TRUE) {
+ mSupportedExtensions.EnableExtension(Extension::PipelineStatisticsQuery);
+ }
+
+ if (mDeviceInfo.properties.limits.timestampComputeAndGraphics == VK_TRUE) {
+ mSupportedExtensions.EnableExtension(Extension::TimestampQuery);
+ }
}
ResultOrError<DeviceBase*> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
index eef62e8d394..0a68231e7b2 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/vulkan/BackendVk.h"
+#include "common/BitSetIterator.h"
#include "common/Log.h"
#include "common/SystemUtils.h"
#include "dawn_native/Instance.h"
@@ -109,14 +110,16 @@ namespace dawn_native { namespace vulkan {
MaybeError Backend::Initialize(bool useSwiftshader) {
DAWN_TRY(LoadVulkan(useSwiftshader));
- // TODO(crbug.com/dawn/406): In order to not modify the environment variables of
- // the rest of an application embedding Dawn, we should set these only
- // in the scope of this function. See ANGLE's ScopedVkLoaderEnvironment
+ // These environment variables need only be set while loading procs and gathering device
+ // info.
+ ScopedEnvironmentVar vkICDFilenames;
+ ScopedEnvironmentVar vkLayerPath;
+
if (useSwiftshader) {
#if defined(DAWN_SWIFTSHADER_VK_ICD_JSON)
std::string fullSwiftshaderICDPath =
GetExecutableDirectory() + DAWN_SWIFTSHADER_VK_ICD_JSON;
- if (!SetEnvironmentVar("VK_ICD_FILENAMES", fullSwiftshaderICDPath.c_str())) {
+ if (!vkICDFilenames.Set("VK_ICD_FILENAMES", fullSwiftshaderICDPath.c_str())) {
return DAWN_INTERNAL_ERROR("Couldn't set VK_ICD_FILENAMES");
}
#else
@@ -128,7 +131,7 @@ namespace dawn_native { namespace vulkan {
if (GetInstance()->IsBackendValidationEnabled()) {
#if defined(DAWN_ENABLE_VULKAN_VALIDATION_LAYERS)
std::string vkDataDir = GetExecutableDirectory() + DAWN_VK_DATA_DIR;
- if (!SetEnvironmentVar("VK_LAYER_PATH", vkDataDir.c_str())) {
+ if (!vkLayerPath.Set("VK_LAYER_PATH", vkDataDir.c_str())) {
return DAWN_INTERNAL_ERROR("Couldn't set VK_LAYER_PATH");
}
#else
@@ -147,7 +150,7 @@ namespace dawn_native { namespace vulkan {
DAWN_TRY(mFunctions.LoadInstanceProcs(mInstance, mGlobalInfo));
- if (usedGlobalKnobs.debugReport) {
+ if (usedGlobalKnobs.HasExt(InstanceExt::DebugReport)) {
DAWN_TRY(RegisterDebugReport());
}
@@ -174,9 +177,7 @@ namespace dawn_native { namespace vulkan {
ResultOrError<VulkanGlobalKnobs> Backend::CreateInstance() {
VulkanGlobalKnobs usedKnobs = {};
-
- std::vector<const char*> layersToRequest;
- std::vector<const char*> extensionsToRequest;
+ std::vector<const char*> layerNames;
// vktrace works by instering a layer, but we hide it behind a macro due to the vktrace
// layer crashes when used without vktrace server started. See this vktrace issue:
@@ -185,7 +186,7 @@ namespace dawn_native { namespace vulkan {
// by other layers.
#if defined(DAWN_USE_VKTRACE)
if (mGlobalInfo.vktrace) {
- layersToRequest.push_back(kLayerNameLunargVKTrace);
+ layerNames.push_back(kLayerNameLunargVKTrace);
usedKnobs.vktrace = true;
}
#endif
@@ -193,76 +194,33 @@ namespace dawn_native { namespace vulkan {
// it unless we are debugging in RenderDoc so we hide it behind a macro.
#if defined(DAWN_USE_RENDERDOC)
if (mGlobalInfo.renderDocCapture) {
- layersToRequest.push_back(kLayerNameRenderDocCapture);
+ layerNames.push_back(kLayerNameRenderDocCapture);
usedKnobs.renderDocCapture = true;
}
#endif
if (GetInstance()->IsBackendValidationEnabled()) {
if (mGlobalInfo.validation) {
- layersToRequest.push_back(kLayerNameKhronosValidation);
+ layerNames.push_back(kLayerNameKhronosValidation);
usedKnobs.validation = true;
}
- if (mGlobalInfo.debugReport) {
- extensionsToRequest.push_back(kExtensionNameExtDebugReport);
- usedKnobs.debugReport = true;
- }
}
- // Always request all extensions used to create VkSurfaceKHR objects so that they are
- // always available for embedders looking to create VkSurfaceKHR on our VkInstance.
- if (mGlobalInfo.fuchsiaImagePipeSwapchain) {
- layersToRequest.push_back(kLayerNameFuchsiaImagePipeSwapchain);
- usedKnobs.fuchsiaImagePipeSwapchain = true;
- }
- if (mGlobalInfo.metalSurface) {
- extensionsToRequest.push_back(kExtensionNameExtMetalSurface);
- usedKnobs.metalSurface = true;
- }
- if (mGlobalInfo.surface) {
- extensionsToRequest.push_back(kExtensionNameKhrSurface);
- usedKnobs.surface = true;
- }
- if (mGlobalInfo.waylandSurface) {
- extensionsToRequest.push_back(kExtensionNameKhrWaylandSurface);
- usedKnobs.waylandSurface = true;
- }
- if (mGlobalInfo.win32Surface) {
- extensionsToRequest.push_back(kExtensionNameKhrWin32Surface);
- usedKnobs.win32Surface = true;
- }
- if (mGlobalInfo.xcbSurface) {
- extensionsToRequest.push_back(kExtensionNameKhrXcbSurface);
- usedKnobs.xcbSurface = true;
- }
- if (mGlobalInfo.xlibSurface) {
- extensionsToRequest.push_back(kExtensionNameKhrXlibSurface);
- usedKnobs.xlibSurface = true;
- }
- if (mGlobalInfo.fuchsiaImagePipeSurface) {
- extensionsToRequest.push_back(kExtensionNameFuchsiaImagePipeSurface);
- usedKnobs.fuchsiaImagePipeSurface = true;
+ // Available and known instance extensions default to being requested, but some special
+ // cases are removed.
+ InstanceExtSet extensionsToRequest = mGlobalInfo.extensions;
+
+ if (!GetInstance()->IsBackendValidationEnabled()) {
+ extensionsToRequest.Set(InstanceExt::DebugReport, false);
}
+ usedKnobs.extensions = extensionsToRequest;
- // Mark the promoted extensions as present if the core version in which they were promoted
- // is used. This allows having a single boolean that checks if the functionality from that
- // extension is available (instead of checking extension || coreVersion).
- if (mGlobalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- usedKnobs.getPhysicalDeviceProperties2 = true;
- usedKnobs.externalMemoryCapabilities = true;
- usedKnobs.externalSemaphoreCapabilities = true;
- } else {
- if (mGlobalInfo.externalMemoryCapabilities) {
- extensionsToRequest.push_back(kExtensionNameKhrExternalMemoryCapabilities);
- usedKnobs.externalMemoryCapabilities = true;
- }
- if (mGlobalInfo.externalSemaphoreCapabilities) {
- extensionsToRequest.push_back(kExtensionNameKhrExternalSemaphoreCapabilities);
- usedKnobs.externalSemaphoreCapabilities = true;
- }
- if (mGlobalInfo.getPhysicalDeviceProperties2) {
- extensionsToRequest.push_back(kExtensionNameKhrGetPhysicalDeviceProperties2);
- usedKnobs.getPhysicalDeviceProperties2 = true;
+ std::vector<const char*> extensionNames;
+ for (uint32_t ext : IterateBitSet(extensionsToRequest.extensionBitSet)) {
+ const InstanceExtInfo& info = GetInstanceExtInfo(static_cast<InstanceExt>(ext));
+
+ if (info.versionPromoted > mGlobalInfo.apiVersion) {
+ extensionNames.push_back(info.name);
}
}
@@ -280,10 +238,10 @@ namespace dawn_native { namespace vulkan {
createInfo.pNext = nullptr;
createInfo.flags = 0;
createInfo.pApplicationInfo = &appInfo;
- createInfo.enabledLayerCount = static_cast<uint32_t>(layersToRequest.size());
- createInfo.ppEnabledLayerNames = layersToRequest.data();
- createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionsToRequest.size());
- createInfo.ppEnabledExtensionNames = extensionsToRequest.data();
+ createInfo.enabledLayerCount = static_cast<uint32_t>(layerNames.size());
+ createInfo.ppEnabledLayerNames = layerNames.data();
+ createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+ createInfo.ppEnabledExtensionNames = extensionNames.data();
DAWN_TRY(CheckVkSuccess(mFunctions.CreateInstance(&createInfo, nullptr, &mInstance),
"vkCreateInstance"));
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
index ba41c385456..1b325bee5b8 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
@@ -93,7 +93,7 @@ namespace dawn_native { namespace vulkan {
const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
VkDescriptorSetLayoutBinding* vkBinding = &bindings[numBindings];
- vkBinding->binding = bindingNumber;
+ vkBinding->binding = static_cast<uint32_t>(bindingNumber);
vkBinding->descriptorType =
VulkanDescriptorType(bindingInfo.type, bindingInfo.hasDynamicOffset);
vkBinding->descriptorCount = 1;
@@ -118,7 +118,7 @@ namespace dawn_native { namespace vulkan {
// Compute the size of descriptor pools used for this layout.
std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
- for (BindingIndex bindingIndex = 0; bindingIndex < GetBindingCount(); ++bindingIndex) {
+ for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
VkDescriptorType vulkanType =
VulkanDescriptorType(bindingInfo.type, bindingInfo.hasDynamicOffset);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
index a936f20b607..eb31182c0a4 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
@@ -52,7 +52,7 @@ namespace dawn_native { namespace vulkan {
write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write.pNext = nullptr;
write.dstSet = GetHandle();
- write.dstBinding = bindingNumber;
+ write.dstBinding = static_cast<uint32_t>(bindingNumber);
write.dstArrayElement = 0;
write.descriptorCount = 1;
write.descriptorType =
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
index 4bc09e065c6..0b9385e9393 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
@@ -148,7 +148,7 @@ namespace dawn_native { namespace vulkan {
createInfo.pQueueFamilyIndices = 0;
Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkSuccess(
+ DAWN_TRY(CheckVkOOMThenSuccess(
device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
"vkCreateBuffer"));
@@ -165,6 +165,10 @@ namespace dawn_native { namespace vulkan {
mMemoryAllocation.GetOffset()),
"vkBindBufferMemory"));
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ ClearBuffer(device->GetPendingRecordingContext(), ClearValue::NonZero);
+ }
+
return {};
}
@@ -186,6 +190,25 @@ namespace dawn_native { namespace vulkan {
void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
wgpu::BufferUsage usage) {
+ std::vector<VkBufferMemoryBarrier> barriers;
+ VkPipelineStageFlags srcStages = 0;
+ VkPipelineStageFlags dstStages = 0;
+
+ TransitionUsageNow(recordingContext, usage, &barriers, &srcStages, &dstStages);
+
+ if (barriers.size() > 0) {
+ ASSERT(barriers.size() == 1);
+ ToBackend(GetDevice())
+ ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+ nullptr, barriers.size(), barriers.data(), 0, nullptr);
+ }
+ }
+
+ void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
+ wgpu::BufferUsage usage,
+ std::vector<VkBufferMemoryBarrier>* bufferBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
bool lastIncludesTarget = (mLastUsage & usage) == usage;
bool lastReadOnly = (mLastUsage & kReadOnlyBufferUsages) == mLastUsage;
@@ -200,8 +223,8 @@ namespace dawn_native { namespace vulkan {
return;
}
- VkPipelineStageFlags srcStages = VulkanPipelineStage(mLastUsage);
- VkPipelineStageFlags dstStages = VulkanPipelineStage(usage);
+ *srcStages |= VulkanPipelineStage(mLastUsage);
+ *dstStages |= VulkanPipelineStage(usage);
VkBufferMemoryBarrier barrier;
barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
@@ -214,9 +237,7 @@ namespace dawn_native { namespace vulkan {
barrier.offset = 0;
barrier.size = GetSize();
- ToBackend(GetDevice())
- ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
- nullptr, 1, &barrier, 0, nullptr);
+ bufferBarriers->push_back(barrier);
mLastUsage = usage;
}
@@ -236,12 +257,6 @@ namespace dawn_native { namespace vulkan {
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
-
- uint8_t* memory = mMemoryAllocation.GetMappedPointer();
- ASSERT(memory != nullptr);
-
- MapRequestTracker* tracker = device->GetMapRequestTracker();
- tracker->Track(this, serial, memory, false);
return {};
}
@@ -250,12 +265,6 @@ namespace dawn_native { namespace vulkan {
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
-
- uint8_t* memory = mMemoryAllocation.GetMappedPointer();
- ASSERT(memory != nullptr);
-
- MapRequestTracker* tracker = device->GetMapRequestTracker();
- tracker->Track(this, serial, memory, true);
return {};
}
@@ -263,6 +272,12 @@ namespace dawn_native { namespace vulkan {
// No need to do anything, we keep CPU-visible memory mapped at all time.
}
+ void* Buffer::GetMappedPointerImpl() {
+ uint8_t* memory = mMemoryAllocation.GetMappedPointer();
+ ASSERT(memory != nullptr);
+ return memory;
+ }
+
void Buffer::DestroyImpl() {
ToBackend(GetDevice())->DeallocateMemory(&mMemoryAllocation);
@@ -272,34 +287,20 @@ namespace dawn_native { namespace vulkan {
}
}
- // MapRequestTracker
-
- MapRequestTracker::MapRequestTracker(Device* device) : mDevice(device) {
- }
+ void Buffer::ClearBuffer(CommandRecordingContext* recordingContext, ClearValue clearValue) {
+ ASSERT(recordingContext != nullptr);
- MapRequestTracker::~MapRequestTracker() {
- ASSERT(mInflightRequests.Empty());
- }
+ // TODO(jiawei.shao@intel.com): support buffer lazy-initialization to 0.
+ ASSERT(clearValue == BufferBase::ClearValue::NonZero);
- void MapRequestTracker::Track(Buffer* buffer, uint32_t mapSerial, void* data, bool isWrite) {
- Request request;
- request.buffer = buffer;
- request.mapSerial = mapSerial;
- request.data = data;
- request.isWrite = isWrite;
+ constexpr uint32_t kClearBufferValue = 0x01010101;
- mInflightRequests.Enqueue(std::move(request), mDevice->GetPendingCommandSerial());
- }
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
- void MapRequestTracker::Tick(Serial finishedSerial) {
- for (auto& request : mInflightRequests.IterateUpTo(finishedSerial)) {
- if (request.isWrite) {
- request.buffer->OnMapWriteCommandSerialFinished(request.mapSerial, request.data);
- } else {
- request.buffer->OnMapReadCommandSerialFinished(request.mapSerial, request.data);
- }
- }
- mInflightRequests.ClearUpTo(finishedSerial);
+ Device* device = ToBackend(GetDevice());
+ // TODO(jiawei.shao@intel.com): find out why VK_WHOLE_SIZE doesn't work on old Windows Intel
+ // Vulkan drivers.
+ device->fn.CmdFillBuffer(recordingContext->commandBuffer, mHandle, 0, GetSize(),
+ kClearBufferValue);
}
-
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
index 3d7fdf9134f..1c04870376a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
@@ -39,11 +39,17 @@ namespace dawn_native { namespace vulkan {
// `commands`.
// TODO(cwallez@chromium.org): coalesce barriers and do them early when possible.
void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
+ void TransitionUsageNow(CommandRecordingContext* recordingContext,
+ wgpu::BufferUsage usage,
+ std::vector<VkBufferMemoryBarrier>* bufferBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
private:
~Buffer() override;
using BufferBase::BufferBase;
MaybeError Initialize();
+ void ClearBuffer(CommandRecordingContext* recordingContext, ClearValue clearValue);
// Dawn API
MaybeError MapReadAsyncImpl(uint32_t serial) override;
@@ -53,6 +59,7 @@ namespace dawn_native { namespace vulkan {
bool IsMapWritable() const override;
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ void* GetMappedPointerImpl() override;
VkBuffer mHandle = VK_NULL_HANDLE;
ResourceMemoryAllocation mMemoryAllocation;
@@ -60,26 +67,6 @@ namespace dawn_native { namespace vulkan {
wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
};
- class MapRequestTracker {
- public:
- MapRequestTracker(Device* device);
- ~MapRequestTracker();
-
- void Track(Buffer* buffer, uint32_t mapSerial, void* data, bool isWrite);
- void Tick(Serial finishedSerial);
-
- private:
- Device* mDevice;
-
- struct Request {
- Ref<Buffer> buffer;
- uint32_t mapSerial;
- void* data;
- bool isWrite;
- };
- SerialQueue<Request> mInflightRequests;
- };
-
}} // namespace dawn_native::vulkan
#endif // DAWNNATIVE_VULKAN_BUFFERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
index 558e5760aac..575f66f8704 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
@@ -16,6 +16,7 @@
#include "dawn_native/BindGroupAndStorageBarrierTracker.h"
#include "dawn_native/CommandEncoder.h"
+#include "dawn_native/CommandValidation.h"
#include "dawn_native/Commands.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/vulkan/BindGroupVk.h"
@@ -64,10 +65,13 @@ namespace dawn_native { namespace vulkan {
VkImageCopy region;
+ // TODO(jiawei.shao@intel.com): support 1D and 3D textures
+ ASSERT(srcTexture->GetDimension() == wgpu::TextureDimension::e2D &&
+ dstTexture->GetDimension() == wgpu::TextureDimension::e2D);
region.srcSubresource.aspectMask = srcTexture->GetVkAspectMask();
region.srcSubresource.mipLevel = srcCopy.mipLevel;
region.srcSubresource.baseArrayLayer = srcCopy.arrayLayer;
- region.srcSubresource.layerCount = 1;
+ region.srcSubresource.layerCount = copySize.depth;
region.srcOffset.x = srcCopy.origin.x;
region.srcOffset.y = srcCopy.origin.y;
@@ -76,7 +80,7 @@ namespace dawn_native { namespace vulkan {
region.dstSubresource.aspectMask = dstTexture->GetVkAspectMask();
region.dstSubresource.mipLevel = dstCopy.mipLevel;
region.dstSubresource.baseArrayLayer = dstCopy.arrayLayer;
- region.dstSubresource.layerCount = 1;
+ region.dstSubresource.layerCount = copySize.depth;
region.dstOffset.x = dstCopy.origin.x;
region.dstOffset.y = dstCopy.origin.y;
@@ -86,28 +90,30 @@ namespace dawn_native { namespace vulkan {
Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
region.extent.width = imageExtent.width;
region.extent.height = imageExtent.height;
- region.extent.depth = imageExtent.depth;
+ region.extent.depth = 1;
return region;
}
- void ApplyDescriptorSets(Device* device,
- VkCommandBuffer commands,
- VkPipelineBindPoint bindPoint,
- VkPipelineLayout pipelineLayout,
- const std::bitset<kMaxBindGroups>& bindGroupsToApply,
- const std::array<BindGroupBase*, kMaxBindGroups>& bindGroups,
- const std::array<uint32_t, kMaxBindGroups>& dynamicOffsetCounts,
- const std::array<std::array<uint32_t, kMaxBindingsPerGroup>,
- kMaxBindGroups>& dynamicOffsets) {
- for (uint32_t dirtyIndex : IterateBitSet(bindGroupsToApply)) {
+ void ApplyDescriptorSets(
+ Device* device,
+ VkCommandBuffer commands,
+ VkPipelineBindPoint bindPoint,
+ VkPipelineLayout pipelineLayout,
+ const BindGroupLayoutMask& bindGroupsToApply,
+ const ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups>& bindGroups,
+ const ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups>& dynamicOffsetCounts,
+ const ityp::array<BindGroupIndex,
+ std::array<uint32_t, kMaxBindingsPerGroup>,
+ kMaxBindGroups>& dynamicOffsets) {
+ for (BindGroupIndex dirtyIndex : IterateBitSet(bindGroupsToApply)) {
VkDescriptorSet set = ToBackend(bindGroups[dirtyIndex])->GetHandle();
const uint32_t* dynamicOffset = dynamicOffsetCounts[dirtyIndex] > 0
? dynamicOffsets[dirtyIndex].data()
: nullptr;
- device->fn.CmdBindDescriptorSets(commands, bindPoint, pipelineLayout, dirtyIndex, 1,
- &*set, dynamicOffsetCounts[dirtyIndex],
- dynamicOffset);
+ device->fn.CmdBindDescriptorSets(commands, bindPoint, pipelineLayout,
+ static_cast<uint32_t>(dirtyIndex), 1, &*set,
+ dynamicOffsetCounts[dirtyIndex], dynamicOffset);
}
}
@@ -139,8 +145,9 @@ namespace dawn_native { namespace vulkan {
mDirtyBindGroupsObjectChangedOrIsDynamic, mBindGroups,
mDynamicOffsetCounts, mDynamicOffsets);
- for (uint32_t index : IterateBitSet(mBindGroupLayoutsMask)) {
- for (uint32_t bindingIndex : IterateBitSet(mBindingsNeedingBarrier[index])) {
+ for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
+ for (BindingIndex bindingIndex :
+ IterateBitSet(mBindingsNeedingBarrier[index])) {
switch (mBindingTypes[index][bindingIndex]) {
case wgpu::BindingType::StorageBuffer:
static_cast<Buffer*>(mBindings[index][bindingIndex])
@@ -149,14 +156,15 @@ namespace dawn_native { namespace vulkan {
break;
case wgpu::BindingType::ReadonlyStorageTexture:
- case wgpu::BindingType::WriteonlyStorageTexture:
- ToBackend(
- static_cast<TextureViewBase*>(mBindings[index][bindingIndex])
- ->GetTexture())
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ TextureViewBase* view =
+ static_cast<TextureViewBase*>(mBindings[index][bindingIndex]);
+ ToBackend(view->GetTexture())
->TransitionUsageNow(recordingContext,
- wgpu::TextureUsage::Storage);
+ wgpu::TextureUsage::Storage,
+ view->GetSubresourceRange());
break;
-
+ }
case wgpu::BindingType::StorageTexture:
// Not implemented.
@@ -370,25 +378,40 @@ namespace dawn_native { namespace vulkan {
VkCommandBuffer commands = recordingContext->commandBuffer;
// Records the necessary barriers for the resource usage pre-computed by the frontend
- auto TransitionForPass = [](CommandRecordingContext* recordingContext,
+ auto TransitionForPass = [](Device* device, CommandRecordingContext* recordingContext,
const PassResourceUsage& usages) {
+ std::vector<VkBufferMemoryBarrier> bufferBarriers;
+ std::vector<VkImageMemoryBarrier> imageBarriers;
+ VkPipelineStageFlags srcStages = 0;
+ VkPipelineStageFlags dstStages = 0;
+
for (size_t i = 0; i < usages.buffers.size(); ++i) {
Buffer* buffer = ToBackend(usages.buffers[i]);
- buffer->TransitionUsageNow(recordingContext, usages.bufferUsages[i]);
+ buffer->TransitionUsageNow(recordingContext, usages.bufferUsages[i],
+ &bufferBarriers, &srcStages, &dstStages);
}
+
for (size_t i = 0; i < usages.textures.size(); ++i) {
Texture* texture = ToBackend(usages.textures[i]);
// Clear textures that are not output attachments. Output attachments will be
// cleared in RecordBeginRenderPass by setting the loadop to clear when the
// texture subresource has not been initialized before the render pass.
if (!(usages.textureUsages[i].usage & wgpu::TextureUsage::OutputAttachment)) {
- texture->EnsureSubresourceContentInitialized(recordingContext, 0,
- texture->GetNumMipLevels(), 0,
- texture->GetArrayLayers());
+ texture->EnsureSubresourceContentInitialized(recordingContext,
+ texture->GetAllSubresources());
}
- texture->TransitionUsageNow(recordingContext, usages.textureUsages[i].usage);
+ texture->TransitionUsageForPass(recordingContext, usages.textureUsages[i],
+ &imageBarriers, &srcStages, &dstStages);
+ }
+
+ if (bufferBarriers.size() || imageBarriers.size()) {
+ device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages,
+ 0, 0, nullptr, bufferBarriers.size(),
+ bufferBarriers.data(), imageBarriers.size(),
+ imageBarriers.data());
}
};
+
const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
size_t nextPassNumber = 0;
@@ -397,6 +420,7 @@ namespace dawn_native { namespace vulkan {
switch (type) {
case Command::CopyBufferToBuffer: {
CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+
Buffer* srcBuffer = ToBackend(copy->source.Get());
Buffer* dstBuffer = ToBackend(copy->destination.Get());
@@ -423,21 +447,21 @@ namespace dawn_native { namespace vulkan {
ComputeBufferImageCopyRegion(src, dst, copy->copySize);
VkImageSubresourceLayers subresource = region.imageSubresource;
+ ASSERT(dst.texture->GetDimension() == wgpu::TextureDimension::e2D);
+ SubresourceRange range = {subresource.mipLevel, 1, subresource.baseArrayLayer,
+ subresource.layerCount};
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
subresource.mipLevel)) {
// Since texture has been overwritten, it has been "initialized"
- dst.texture->SetIsSubresourceContentInitialized(
- true, subresource.mipLevel, 1, subresource.baseArrayLayer, 1);
+ dst.texture->SetIsSubresourceContentInitialized(true, range);
} else {
ToBackend(dst.texture)
- ->EnsureSubresourceContentInitialized(recordingContext,
- subresource.mipLevel, 1,
- subresource.baseArrayLayer, 1);
+ ->EnsureSubresourceContentInitialized(recordingContext, range);
}
ToBackend(src.buffer)
->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
ToBackend(dst.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst);
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
VkImage dstImage = ToBackend(dst.texture)->GetHandle();
@@ -458,13 +482,15 @@ namespace dawn_native { namespace vulkan {
ComputeBufferImageCopyRegion(dst, src, copy->copySize);
VkImageSubresourceLayers subresource = region.imageSubresource;
+ ASSERT(src.texture->GetDimension() == wgpu::TextureDimension::e2D);
+ const SubresourceRange range = {subresource.mipLevel, 1,
+ subresource.baseArrayLayer,
+ subresource.layerCount};
ToBackend(src.texture)
- ->EnsureSubresourceContentInitialized(recordingContext,
- subresource.mipLevel, 1,
- subresource.baseArrayLayer, 1);
+ ->EnsureSubresourceContentInitialized(recordingContext, range);
ToBackend(src.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc);
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
ToBackend(dst.buffer)
->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
@@ -481,25 +507,37 @@ namespace dawn_native { namespace vulkan {
mCommands.NextCommand<CopyTextureToTextureCmd>();
TextureCopy& src = copy->source;
TextureCopy& dst = copy->destination;
+ SubresourceRange srcRange = {src.mipLevel, 1, src.arrayLayer,
+ copy->copySize.depth};
+ SubresourceRange dstRange = {dst.mipLevel, 1, dst.arrayLayer,
+ copy->copySize.depth};
ToBackend(src.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, src.mipLevel, 1,
- src.arrayLayer, 1);
+ ->EnsureSubresourceContentInitialized(recordingContext, srcRange);
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
dst.mipLevel)) {
// Since destination texture has been overwritten, it has been "initialized"
- dst.texture->SetIsSubresourceContentInitialized(true, dst.mipLevel, 1,
- dst.arrayLayer, 1);
+ dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
} else {
ToBackend(dst.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, dst.mipLevel, 1,
- dst.arrayLayer, 1);
+ ->EnsureSubresourceContentInitialized(recordingContext, dstRange);
+ }
+
+ if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
+ // When there are overlapped subresources, the layout of the overlapped
+ // subresources should all be GENERAL instead of what we set now. Currently
+ // it is not allowed to copy with overlapped subresources, but we still
+ // add the ASSERT here as a reminder for this possible misuse.
+ ASSERT(!IsRangeOverlapped(src.arrayLayer, dst.arrayLayer,
+ copy->copySize.depth));
}
ToBackend(src.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc);
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
+ srcRange);
ToBackend(dst.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst);
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
+ dstRange);
// In some situations we cannot do texture-to-texture copies with vkCmdCopyImage
// because as Vulkan SPEC always validates image copies with the virtual size of
@@ -539,7 +577,7 @@ namespace dawn_native { namespace vulkan {
case Command::BeginRenderPass: {
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- TransitionForPass(recordingContext, passResourceUsages[nextPassNumber]);
+ TransitionForPass(device, recordingContext, passResourceUsages[nextPassNumber]);
LazyClearRenderPassAttachments(cmd);
DAWN_TRY(RecordRenderPass(recordingContext, cmd));
@@ -551,7 +589,7 @@ namespace dawn_native { namespace vulkan {
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
- TransitionForPass(recordingContext, passResourceUsages[nextPassNumber]);
+ TransitionForPass(device, recordingContext, passResourceUsages[nextPassNumber]);
RecordComputePass(recordingContext);
nextPassNumber++;
@@ -626,7 +664,7 @@ namespace dawn_native { namespace vulkan {
}
case Command::InsertDebugMarker: {
- if (device->GetDeviceInfo().debugMarker) {
+ if (device->GetDeviceInfo().HasExt(DeviceExt::DebugMarker)) {
InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
const char* label = mCommands.NextData<char>(cmd->length + 1);
VkDebugMarkerMarkerInfoEXT markerInfo;
@@ -646,7 +684,7 @@ namespace dawn_native { namespace vulkan {
}
case Command::PopDebugGroup: {
- if (device->GetDeviceInfo().debugMarker) {
+ if (device->GetDeviceInfo().HasExt(DeviceExt::DebugMarker)) {
mCommands.NextCommand<PopDebugGroupCmd>();
device->fn.CmdDebugMarkerEndEXT(commands);
} else {
@@ -656,7 +694,7 @@ namespace dawn_native { namespace vulkan {
}
case Command::PushDebugGroup: {
- if (device->GetDeviceInfo().debugMarker) {
+ if (device->GetDeviceInfo().HasExt(DeviceExt::DebugMarker)) {
PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
const char* label = mCommands.NextData<char>(cmd->length + 1);
VkDebugMarkerMarkerInfoEXT markerInfo;
@@ -773,7 +811,7 @@ namespace dawn_native { namespace vulkan {
}
case Command::InsertDebugMarker: {
- if (device->GetDeviceInfo().debugMarker) {
+ if (device->GetDeviceInfo().HasExt(DeviceExt::DebugMarker)) {
InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
const char* label = iter->NextData<char>(cmd->length + 1);
VkDebugMarkerMarkerInfoEXT markerInfo;
@@ -793,7 +831,7 @@ namespace dawn_native { namespace vulkan {
}
case Command::PopDebugGroup: {
- if (device->GetDeviceInfo().debugMarker) {
+ if (device->GetDeviceInfo().HasExt(DeviceExt::DebugMarker)) {
iter->NextCommand<PopDebugGroupCmd>();
device->fn.CmdDebugMarkerEndEXT(commands);
} else {
@@ -803,7 +841,7 @@ namespace dawn_native { namespace vulkan {
}
case Command::PushDebugGroup: {
- if (device->GetDeviceInfo().debugMarker) {
+ if (device->GetDeviceInfo().HasExt(DeviceExt::DebugMarker)) {
PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
const char* label = iter->NextData<char>(cmd->length + 1);
VkDebugMarkerMarkerInfoEXT markerInfo;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
index b4a9dda9a6d..f681547655b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
@@ -18,6 +18,7 @@
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/PipelineLayoutVk.h"
#include "dawn_native/vulkan/ShaderModuleVk.h"
+#include "dawn_native/vulkan/UtilsVulkan.h"
#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -49,6 +50,19 @@ namespace dawn_native { namespace vulkan {
createInfo.stage.pSpecializationInfo = nullptr;
Device* device = ToBackend(GetDevice());
+
+ PNextChainBuilder extChain(&createInfo);
+
+ VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeInfo = {};
+ uint32_t computeSubgroupSize = device->GetComputeSubgroupSize();
+ if (computeSubgroupSize != 0u) {
+ ASSERT(device->GetDeviceInfo().HasExt(DeviceExt::SubgroupSizeControl));
+ subgroupSizeInfo.requiredSubgroupSize = computeSubgroupSize;
+ extChain.Add(
+ &subgroupSizeInfo,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
+ }
+
return CheckVkSuccess(
device->fn.CreateComputePipelines(device->GetVkDevice(), ::VK_NULL_HANDLE, 1,
&createInfo, nullptr, &*mHandle),
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
index a2ba72a1839..921dd7341c3 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
@@ -38,6 +38,7 @@
#include "dawn_native/vulkan/StagingBufferVk.h"
#include "dawn_native/vulkan/SwapChainVk.h"
#include "dawn_native/vulkan/TextureVk.h"
+#include "dawn_native/vulkan/UtilsVulkan.h"
#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -82,7 +83,6 @@ namespace dawn_native { namespace vulkan {
mDeleter = std::make_unique<FencedDeleter>(this);
}
- mMapRequestTracker = std::make_unique<MapRequestTracker>(this);
mRenderPassCache = std::make_unique<RenderPassCache>(this);
mResourceMemoryAllocator = std::make_unique<ResourceMemoryAllocator>(this);
@@ -125,6 +125,9 @@ namespace dawn_native { namespace vulkan {
const PipelineLayoutDescriptor* descriptor) {
return PipelineLayout::Create(this, descriptor);
}
+ ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation");
+ }
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
return RenderPipeline::Create(this, descriptor);
@@ -156,7 +159,6 @@ namespace dawn_native { namespace vulkan {
}
MaybeError Device::TickImpl() {
- CheckPassedSerials();
RecycleCompletedCommands();
Serial completedSerial = GetCompletedCommandSerial();
@@ -167,16 +169,11 @@ namespace dawn_native { namespace vulkan {
}
mBindGroupLayoutsPendingDeallocation.ClearUpTo(completedSerial);
- mMapRequestTracker->Tick(completedSerial);
mResourceMemoryAllocator->Tick(completedSerial);
mDeleter->Tick(completedSerial);
if (mRecordingContext.used) {
DAWN_TRY(SubmitPendingCommands());
- } else if (completedSerial == GetLastSubmittedCommandSerial()) {
- // If there's no GPU work in flight we still need to artificially increment the serial
- // so that CPU operations waiting on GPU completion can know they don't have to wait.
- ArtificiallyIncrementSerials();
}
return {};
@@ -201,10 +198,6 @@ namespace dawn_native { namespace vulkan {
return mQueue;
}
- MapRequestTracker* Device::GetMapRequestTracker() const {
- return mMapRequestTracker.get();
- }
-
FencedDeleter* Device::GetFencedDeleter() const {
return mDeleter.get();
}
@@ -276,56 +269,29 @@ namespace dawn_native { namespace vulkan {
ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalDevice) {
VulkanDeviceKnobs usedKnobs = {};
- float zero = 0.0f;
- std::vector<const char*> layersToRequest;
- std::vector<const char*> extensionsToRequest;
- std::vector<VkDeviceQueueCreateInfo> queuesToRequest;
+ // Default to asking for all avilable known extensions.
+ usedKnobs.extensions = mDeviceInfo.extensions;
- if (mDeviceInfo.debugMarker) {
- extensionsToRequest.push_back(kExtensionNameExtDebugMarker);
- usedKnobs.debugMarker = true;
- }
- if (mDeviceInfo.externalMemory) {
- extensionsToRequest.push_back(kExtensionNameKhrExternalMemory);
- usedKnobs.externalMemory = true;
- }
- if (mDeviceInfo.externalMemoryFD) {
- extensionsToRequest.push_back(kExtensionNameKhrExternalMemoryFD);
- usedKnobs.externalMemoryFD = true;
- }
- if (mDeviceInfo.externalMemoryDmaBuf) {
- extensionsToRequest.push_back(kExtensionNameExtExternalMemoryDmaBuf);
- usedKnobs.externalMemoryDmaBuf = true;
- }
- if (mDeviceInfo.imageDrmFormatModifier) {
- extensionsToRequest.push_back(kExtensionNameExtImageDrmFormatModifier);
- usedKnobs.imageDrmFormatModifier = true;
- }
- if (mDeviceInfo.externalMemoryZirconHandle) {
- extensionsToRequest.push_back(kExtensionNameFuchsiaExternalMemory);
- usedKnobs.externalMemoryZirconHandle = true;
- }
- if (mDeviceInfo.externalSemaphore) {
- extensionsToRequest.push_back(kExtensionNameKhrExternalSemaphore);
- usedKnobs.externalSemaphore = true;
- }
- if (mDeviceInfo.externalSemaphoreFD) {
- extensionsToRequest.push_back(kExtensionNameKhrExternalSemaphoreFD);
- usedKnobs.externalSemaphoreFD = true;
- }
- if (mDeviceInfo.externalSemaphoreZirconHandle) {
- extensionsToRequest.push_back(kExtensionNameFuchsiaExternalSemaphore);
- usedKnobs.externalSemaphoreZirconHandle = true;
- }
- if (mDeviceInfo.swapchain) {
- extensionsToRequest.push_back(kExtensionNameKhrSwapchain);
- usedKnobs.swapchain = true;
- }
- if (mDeviceInfo.maintenance1) {
- extensionsToRequest.push_back(kExtensionNameKhrMaintenance1);
- usedKnobs.maintenance1 = true;
+ // However only request the extensions that haven't been promoted in the device's apiVersion
+ std::vector<const char*> extensionNames;
+ for (uint32_t ext : IterateBitSet(usedKnobs.extensions.extensionBitSet)) {
+ const DeviceExtInfo& info = GetDeviceExtInfo(static_cast<DeviceExt>(ext));
+
+ if (info.versionPromoted > mDeviceInfo.properties.apiVersion) {
+ extensionNames.push_back(info.name);
+ }
}
+ // Some device features can only be enabled using a VkPhysicalDeviceFeatures2 struct, which
+ // is supported by the VK_EXT_get_physical_properties2 instance extension, which was
+ // promoted as a core API in Vulkan 1.1.
+ //
+ // Prepare a VkPhysicalDeviceFeatures2 struct for this use case, it will only be populated
+ // if HasExt(DeviceExt::GetPhysicalDeviceProperties2) is true.
+ VkPhysicalDeviceFeatures2 features2 = {};
+ features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ PNextChainBuilder featuresChain(&features2);
+
// Always require independentBlend because it is a core Dawn feature
usedKnobs.features.independentBlend = VK_TRUE;
// Always require imageCubeArray because it is a core Dawn feature
@@ -333,12 +299,38 @@ namespace dawn_native { namespace vulkan {
// Always require fragmentStoresAndAtomics because it is required by end2end tests.
usedKnobs.features.fragmentStoresAndAtomics = VK_TRUE;
+ if (mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+ ASSERT(usedKnobs.HasExt(DeviceExt::SubgroupSizeControl));
+
+ // Always request all the features from VK_EXT_subgroup_size_control when available.
+ usedKnobs.subgroupSizeControlFeatures = mDeviceInfo.subgroupSizeControlFeatures;
+ featuresChain.Add(&usedKnobs.subgroupSizeControlFeatures);
+
+ mComputeSubgroupSize = FindComputeSubgroupSize();
+ }
+
if (IsExtensionEnabled(Extension::TextureCompressionBC)) {
ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionBC ==
VK_TRUE);
usedKnobs.features.textureCompressionBC = VK_TRUE;
}
+ if (IsExtensionEnabled(Extension::ShaderFloat16)) {
+ const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+ ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
+ deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
+ deviceInfo.HasExt(DeviceExt::_16BitStorage) &&
+ deviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE);
+
+ usedKnobs.shaderFloat16Int8Features.shaderFloat16 = VK_TRUE;
+ usedKnobs._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess = VK_TRUE;
+
+ featuresChain.Add(&usedKnobs.shaderFloat16Int8Features,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+ featuresChain.Add(&usedKnobs._16BitStorageFeatures,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+ }
+
// Find a universal queue family
{
// Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
@@ -359,6 +351,8 @@ namespace dawn_native { namespace vulkan {
}
// Choose to create a single universal queue
+ std::vector<VkDeviceQueueCreateInfo> queuesToRequest;
+ float zero = 0.0f;
{
VkDeviceQueueCreateInfo queueCreateInfo;
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
@@ -377,11 +371,21 @@ namespace dawn_native { namespace vulkan {
createInfo.flags = 0;
createInfo.queueCreateInfoCount = static_cast<uint32_t>(queuesToRequest.size());
createInfo.pQueueCreateInfos = queuesToRequest.data();
- createInfo.enabledLayerCount = static_cast<uint32_t>(layersToRequest.size());
- createInfo.ppEnabledLayerNames = layersToRequest.data();
- createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionsToRequest.size());
- createInfo.ppEnabledExtensionNames = extensionsToRequest.data();
- createInfo.pEnabledFeatures = &usedKnobs.features;
+ createInfo.enabledLayerCount = 0;
+ createInfo.ppEnabledLayerNames = nullptr;
+ createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+ createInfo.ppEnabledExtensionNames = extensionNames.data();
+
+ // When we have DeviceExt::GetPhysicalDeviceProperties2, use features2 so that features not
+ // covered by VkPhysicalDeviceFeatures can be enabled.
+ if (mDeviceInfo.HasExt(DeviceExt::GetPhysicalDeviceProperties2)) {
+ features2.features = usedKnobs.features;
+ createInfo.pNext = &features2;
+ createInfo.pEnabledFeatures = nullptr;
+ } else {
+ ASSERT(features2.pNext == nullptr);
+ createInfo.pEnabledFeatures = &usedKnobs.features;
+ }
DAWN_TRY(CheckVkSuccess(fn.CreateDevice(physicalDevice, &createInfo, nullptr, &mVkDevice),
"vkCreateDevice"));
@@ -389,6 +393,32 @@ namespace dawn_native { namespace vulkan {
return usedKnobs;
}
+ uint32_t Device::FindComputeSubgroupSize() const {
+ if (!mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+ return 0;
+ }
+
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& ext =
+ mDeviceInfo.subgroupSizeControlProperties;
+
+ if (ext.minSubgroupSize == ext.maxSubgroupSize) {
+ return 0;
+ }
+
+ // At the moment, only Intel devices support varying subgroup sizes and 16, which is the
+ // next value after the minimum of 8, is the sweet spot according to [1]. Hence the
+ // following heuristics, which may need to be adjusted in the future for other
+ // architectures, or if a specific API is added to let client code select the size..
+ //
+ // [1] https://bugs.freedesktop.org/show_bug.cgi?id=108875
+ uint32_t subgroupSize = ext.minSubgroupSize * 2;
+ if (subgroupSize <= ext.maxSubgroupSize) {
+ return subgroupSize;
+ } else {
+ return ext.minSubgroupSize;
+ }
+ }
+
void Device::GatherQueueFromDevice() {
fn.GetDeviceQueue(mVkDevice, mQueueFamily, 0, &mQueue);
}
@@ -552,12 +582,9 @@ namespace dawn_native { namespace vulkan {
BufferBase* destination,
uint64_t destinationOffset,
uint64_t size) {
- // It is a validation error to do a 0-sized copy in Vulkan skip it since it is a noop.
- if (size == 0) {
- return {};
- }
-
- CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+ // It is a validation error to do a 0-sized copy in Vulkan, check it is skipped prior to
+ // calling this function.
+ ASSERT(size != 0);
// Insert memory barrier to ensure host write operations are made visible before
// copying from the staging buffer. However, this barrier can be removed (see note below).
@@ -568,6 +595,7 @@ namespace dawn_native { namespace vulkan {
// Insert pipeline barrier to ensure correct ordering with previous memory operations on the
// buffer.
+ CommandRecordingContext* recordingContext = GetPendingRecordingContext();
ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
VkBufferCopy copy;
@@ -592,6 +620,12 @@ namespace dawn_native { namespace vulkan {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
+ // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
+ // GPUTextureDescriptor.size.depth is done.
+ TextureDescriptor fixedDescriptor;
+ DAWN_TRY_ASSIGN(fixedDescriptor, FixTextureDescriptor(this, textureDescriptor));
+ textureDescriptor = &fixedDescriptor;
+
// Check services support this combination of handle type / image info
if (!mExternalSemaphoreService->Supported()) {
return DAWN_VALIDATION_ERROR("External semaphore usage not supported");
@@ -711,7 +745,21 @@ namespace dawn_native { namespace vulkan {
return mResourceMemoryAllocator.get();
}
+ uint32_t Device::GetComputeSubgroupSize() const {
+ return mComputeSubgroupSize;
+ }
+
MaybeError Device::WaitForIdleForDestruction() {
+ // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+ // Move the mRecordingContext.used to mUnusedCommands so it can be cleaned up in
+ // ShutDownImpl
+ if (mRecordingContext.used) {
+ CommandPoolAndBuffer commands = {mRecordingContext.commandPool,
+ mRecordingContext.commandBuffer};
+ mUnusedCommands.push_back(commands);
+ mRecordingContext = CommandRecordingContext();
+ }
+
VkResult waitIdleResult = VkResult::WrapUnsafe(fn.QueueWaitIdle(mQueue));
// Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
// about, Device lost, which means workloads running on the GPU are no longer accessible
@@ -737,9 +785,6 @@ namespace dawn_native { namespace vulkan {
mFencesInFlight.pop();
}
-
- // Force all operations to look as if they were completed
- AssumeCommandsComplete();
return {};
}
@@ -779,14 +824,6 @@ namespace dawn_native { namespace vulkan {
}
mRecordingContext.signalSemaphores.clear();
- // Some operations might have been started since the last submit and waiting
- // on a serial that doesn't have a corresponding fence enqueued. Force all
- // operations to look as if they were completed (because they were).
- AssumeCommandsComplete();
-
- // Assert that errors are device loss so that we can continue with destruction
- AssertAndIgnoreDeviceLossError(TickImpl());
-
ASSERT(mCommandsInFlight.Empty());
for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
@@ -802,8 +839,6 @@ namespace dawn_native { namespace vulkan {
// Call Tick() again to clear them before releasing the deleter.
mDeleter->Tick(GetCompletedCommandSerial());
- mMapRequestTracker = nullptr;
-
// The VkRenderPasses in the cache can be destroyed immediately since all commands referring
// to them are guaranteed to be finished executing.
mRenderPassCache = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
index 585d7f076a5..9e9ded9727c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
@@ -37,7 +37,6 @@ namespace dawn_native { namespace vulkan {
class BindGroupLayout;
class BufferUploader;
class FencedDeleter;
- class MapRequestTracker;
class RenderPassCache;
class ResourceMemoryAllocator;
@@ -59,7 +58,6 @@ namespace dawn_native { namespace vulkan {
BufferUploader* GetBufferUploader() const;
FencedDeleter* GetFencedDeleter() const;
- MapRequestTracker* GetMapRequestTracker() const;
RenderPassCache* GetRenderPassCache() const;
CommandRecordingContext* GetPendingRecordingContext();
@@ -98,6 +96,10 @@ namespace dawn_native { namespace vulkan {
ResourceMemoryAllocator* GetResourceMemoryAllocatorForTesting() const;
+ // Return the fixed subgroup size to use for compute shaders on this device or 0 if none
+ // needs to be set.
+ uint32_t GetComputeSubgroupSize() const;
+
private:
Device(Adapter* adapter, const DeviceDescriptor* descriptor);
@@ -110,6 +112,8 @@ namespace dawn_native { namespace vulkan {
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
@@ -130,6 +134,7 @@ namespace dawn_native { namespace vulkan {
ResultOrError<VulkanDeviceKnobs> CreateDevice(VkPhysicalDevice physicalDevice);
void GatherQueueFromDevice();
+ uint32_t FindComputeSubgroupSize() const;
void InitTogglesFromDriver();
void ApplyDepth24PlusS8Toggle();
@@ -144,10 +149,10 @@ namespace dawn_native { namespace vulkan {
VkDevice mVkDevice = VK_NULL_HANDLE;
uint32_t mQueueFamily = 0;
VkQueue mQueue = VK_NULL_HANDLE;
+ uint32_t mComputeSubgroupSize = 0;
SerialQueue<Ref<BindGroupLayout>> mBindGroupLayoutsPendingDeallocation;
std::unique_ptr<FencedDeleter> mDeleter;
- std::unique_ptr<MapRequestTracker> mMapRequestTracker;
std::unique_ptr<ResourceMemoryAllocator> mResourceMemoryAllocator;
std::unique_ptr<RenderPassCache> mRenderPassCache;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h b/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
index 9b5a7a1dc73..e11a74fe4aa 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
@@ -27,6 +27,7 @@ namespace dawn_native { namespace vulkan {
class ComputePipeline;
class Device;
class PipelineLayout;
+ class QuerySet;
class Queue;
class RenderPipeline;
class ResourceHeap;
@@ -46,6 +47,7 @@ namespace dawn_native { namespace vulkan {
using ComputePipelineType = ComputePipeline;
using DeviceType = Device;
using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
using QueueType = Queue;
using RenderPipelineType = RenderPipeline;
using ResourceHeapType = ResourceHeap;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
index 87b47ab77f1..80c5aa833e8 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
@@ -37,7 +37,7 @@ namespace dawn_native { namespace vulkan {
// this constraints at the Dawn level?
uint32_t numSetLayouts = 0;
std::array<VkDescriptorSetLayout, kMaxBindGroups> setLayouts;
- for (uint32_t setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
+ for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
setLayouts[numSetLayouts] = ToBackend(GetBindGroupLayout(setIndex))->GetHandle();
numSetLayouts++;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
index cff74349850..22523a36874 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "common/Math.h"
#include "dawn_native/BuddyMemoryAllocator.h"
#include "dawn_native/ResourceHeapAllocator.h"
#include "dawn_native/vulkan/DeviceVk.h"
@@ -28,8 +29,7 @@ namespace dawn_native { namespace vulkan {
// TODO(cwallez@chromium.org): This is a hardcoded heurstic to choose when to
// suballocate but it should ideally depend on the size of the memory heaps and other
// factors.
- constexpr uint64_t kMaxBuddySystemSize = 32ull * 1024ull * 1024ull * 1024ull; // 32GB
- constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull; // 4MB
+ constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull; // 4MiB
// Have each bucket of the buddy system allocate at least some resource of the maximum
// size
@@ -42,10 +42,18 @@ namespace dawn_native { namespace vulkan {
class ResourceMemoryAllocator::SingleTypeAllocator : public ResourceHeapAllocator {
public:
- SingleTypeAllocator(Device* device, size_t memoryTypeIndex)
+ SingleTypeAllocator(Device* device, size_t memoryTypeIndex, VkDeviceSize memoryHeapSize)
: mDevice(device),
mMemoryTypeIndex(memoryTypeIndex),
- mBuddySystem(kMaxBuddySystemSize, kBuddyHeapsSize, this) {
+ mMemoryHeapSize(memoryHeapSize),
+ mBuddySystem(
+ // Round down to a power of 2 that's <= mMemoryHeapSize. This will always
+ // be a multiple of kBuddyHeapsSize because kBuddyHeapsSize is a power of 2.
+ uint64_t(1) << Log2(mMemoryHeapSize),
+ // Take the min in the very unlikely case the memory heap is tiny.
+ std::min(uint64_t(1) << Log2(mMemoryHeapSize), kBuddyHeapsSize),
+ this) {
+ ASSERT(IsPowerOfTwo(kBuddyHeapsSize));
}
~SingleTypeAllocator() override = default;
@@ -62,6 +70,10 @@ namespace dawn_native { namespace vulkan {
ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
uint64_t size) override {
+ if (size > mMemoryHeapSize) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Allocation size too large");
+ }
+
VkMemoryAllocateInfo allocateInfo;
allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocateInfo.pNext = nullptr;
@@ -87,6 +99,7 @@ namespace dawn_native { namespace vulkan {
private:
Device* mDevice;
size_t mMemoryTypeIndex;
+ VkDeviceSize mMemoryHeapSize;
BuddyMemoryAllocator mBuddySystem;
};
@@ -97,7 +110,8 @@ namespace dawn_native { namespace vulkan {
mAllocatorsPerType.reserve(info.memoryTypes.size());
for (size_t i = 0; i < info.memoryTypes.size(); i++) {
- mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(mDevice, i));
+ mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(
+ mDevice, i, info.memoryHeaps[info.memoryTypes[i].heapIndex].size));
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
index 36f9db815b2..d76e789b441 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
@@ -37,6 +37,7 @@ namespace dawn_native { namespace vulkan {
}
MaybeError ShaderModule::Initialize() {
+ DAWN_TRY(InitializeBase());
const std::vector<uint32_t>& spirv = GetSpirv();
// Use SPIRV-Cross to extract info from the SPIRV even if Vulkan consumes SPIRV. We want to
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
index 0c6aad3e605..660128776d6 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
@@ -53,13 +53,14 @@ namespace dawn_native { namespace vulkan {
.Detach();
}
- MaybeError SwapChain::OnBeforePresent(TextureBase* texture) {
+ MaybeError SwapChain::OnBeforePresent(TextureViewBase* view) {
Device* device = ToBackend(GetDevice());
// Perform the necessary pipeline barriers for the texture to be used with the usage
// requested by the implementation.
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- ToBackend(texture)->TransitionUsageNow(recordingContext, mTextureUsage);
+ ToBackend(view->GetTexture())
+ ->TransitionUsageNow(recordingContext, mTextureUsage, view->GetSubresourceRange());
DAWN_TRY(device->SubmitPendingCommands());
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
index 7765fc0c509..9ee1792f60c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
@@ -32,7 +32,7 @@ namespace dawn_native { namespace vulkan {
~SwapChain() override;
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureBase* texture) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
private:
wgpu::TextureUsage mTextureUsage;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
index bd036dd8757..eb502f8c1f3 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
@@ -32,18 +32,6 @@
namespace dawn_native { namespace vulkan {
namespace {
- // Converts an Dawn texture dimension to a Vulkan image type.
- // Note that in Vulkan dimensionality is only 1D, 2D, 3D. Arrays and cube maps are expressed
- // via the array size and a "cubemap compatible" flag.
- VkImageType VulkanImageType(wgpu::TextureDimension dimension) {
- switch (dimension) {
- case wgpu::TextureDimension::e2D:
- return VK_IMAGE_TYPE_2D;
- default:
- UNREACHABLE();
- }
- }
-
// Converts an Dawn texture dimension to a Vulkan image view type.
// Contrary to image types, image view types include arrayness and cubemapness
VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
@@ -221,8 +209,49 @@ namespace dawn_native { namespace vulkan {
}
}
- VkExtent3D VulkanExtent3D(const Extent3D& extent) {
- return {extent.width, extent.height, extent.depth};
+ VkImageMemoryBarrier BuildMemoryBarrier(const Format& format,
+ const VkImage& image,
+ wgpu::TextureUsage lastUsage,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ VkImageMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = VulkanAccessFlags(lastUsage, format);
+ barrier.dstAccessMask = VulkanAccessFlags(usage, format);
+ barrier.oldLayout = VulkanImageLayout(lastUsage, format);
+ barrier.newLayout = VulkanImageLayout(usage, format);
+ barrier.image = image;
+ barrier.subresourceRange.aspectMask = VulkanAspectMask(format);
+ barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
+ barrier.subresourceRange.levelCount = range.levelCount;
+ barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
+ barrier.subresourceRange.layerCount = range.layerCount;
+
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ return barrier;
+ }
+
+ void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
+ const Extent3D& size = texture.GetSize();
+
+ info->mipLevels = texture.GetNumMipLevels();
+ info->samples = VulkanSampleCount(texture.GetSampleCount());
+
+ // Fill in the image type, and paper over differences in how the array layer count is
+ // specified between WebGPU and Vulkan.
+ switch (texture.GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ info->imageType = VK_IMAGE_TYPE_2D;
+ info->extent = {size.width, size.height, 1};
+ info->arrayLayers = size.depth;
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
}
} // namespace
@@ -403,7 +432,7 @@ namespace dawn_native { namespace vulkan {
return DAWN_VALIDATION_ERROR("Mip level count must be 1");
}
- if (descriptor->arrayLayerCount != 1) {
+ if (descriptor->size.depth != 1) {
return DAWN_VALIDATION_ERROR("Array layer count must be 1");
}
@@ -468,15 +497,12 @@ namespace dawn_native { namespace vulkan {
// combination of sample, usage etc. because validation should have been done in the Dawn
// frontend already based on the minimum supported formats in the Vulkan spec
VkImageCreateInfo createInfo = {};
+ FillVulkanCreateInfoSizesAndType(*this, &createInfo);
+
createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
- createInfo.imageType = VulkanImageType(GetDimension());
createInfo.format = VulkanImageFormat(device, GetFormat().format);
- createInfo.extent = VulkanExtent3D(GetSize());
- createInfo.mipLevels = GetNumMipLevels();
- createInfo.arrayLayers = GetArrayLayers();
- createInfo.samples = VulkanSampleCount(GetSampleCount());
createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
createInfo.usage = VulkanImageUsage(GetUsage(), GetFormat());
createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
@@ -486,7 +512,7 @@ namespace dawn_native { namespace vulkan {
ASSERT(IsSampleCountSupported(device, createInfo));
- if (GetArrayLayers() >= 6 && GetSize().width == GetSize().height) {
+ if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
}
@@ -512,9 +538,8 @@ namespace dawn_native { namespace vulkan {
"BindImageMemory"));
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(), 0,
- GetNumMipLevels(), 0, GetArrayLayers(),
- TextureBase::ClearValue::NonZero));
+ DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
+ GetAllSubresources(), TextureBase::ClearValue::NonZero));
}
return {};
@@ -530,15 +555,13 @@ namespace dawn_native { namespace vulkan {
}
mExternalState = ExternalState::PendingAcquire;
+
VkImageCreateInfo baseCreateInfo = {};
+ FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
+
baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
baseCreateInfo.pNext = nullptr;
- baseCreateInfo.imageType = VulkanImageType(GetDimension());
baseCreateInfo.format = format;
- baseCreateInfo.extent = VulkanExtent3D(GetSize());
- baseCreateInfo.mipLevels = GetNumMipLevels();
- baseCreateInfo.arrayLayers = GetArrayLayers();
- baseCreateInfo.samples = VulkanSampleCount(GetSampleCount());
baseCreateInfo.usage = usage;
baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
baseCreateInfo.queueFamilyIndexCount = 0;
@@ -568,7 +591,7 @@ namespace dawn_native { namespace vulkan {
// Don't clear imported texture if already cleared
if (descriptor->isCleared) {
- SetIsSubresourceContentInitialized(true, 0, 1, 0, 1);
+ SetIsSubresourceContentInitialized(true, {0, 1, 0, 1});
}
// Success, acquire all the external objects.
@@ -594,7 +617,7 @@ namespace dawn_native { namespace vulkan {
// Release the texture
mExternalState = ExternalState::PendingRelease;
- TransitionUsageNow(device->GetPendingRecordingContext(), wgpu::TextureUsage::None);
+ TransitionFullUsage(device->GetPendingRecordingContext(), wgpu::TextureUsage::None);
// Queue submit to signal we are done with the texture
device->GetPendingRecordingContext()->signalSemaphores.push_back(mSignalSemaphore);
@@ -644,111 +667,237 @@ namespace dawn_native { namespace vulkan {
return VulkanAspectMask(GetFormat());
}
- void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
- wgpu::TextureUsage usage) {
- // Avoid encoding barriers when it isn't needed.
- bool lastReadOnly = (mLastUsage & kReadOnlyTextureUsages) == mLastUsage;
- if (lastReadOnly && mLastUsage == usage && mLastExternalState == mExternalState) {
- return;
- }
+ void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+ std::vector<VkImageMemoryBarrier>* barriers,
+ size_t transitionBarrierStart) {
+ ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
- const Format& format = GetFormat();
-
- VkPipelineStageFlags srcStages = VulkanPipelineStage(mLastUsage, format);
- VkPipelineStageFlags dstStages = VulkanPipelineStage(usage, format);
-
- VkImageMemoryBarrier barrier;
- barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
- barrier.pNext = nullptr;
- barrier.srcAccessMask = VulkanAccessFlags(mLastUsage, format);
- barrier.dstAccessMask = VulkanAccessFlags(usage, format);
- barrier.oldLayout = VulkanImageLayout(mLastUsage, format);
- barrier.newLayout = VulkanImageLayout(usage, format);
- barrier.image = mHandle;
- // This transitions the whole resource but assumes it is a 2D texture
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
- barrier.subresourceRange.aspectMask = VulkanAspectMask(format);
- barrier.subresourceRange.baseMipLevel = 0;
- barrier.subresourceRange.levelCount = GetNumMipLevels();
- barrier.subresourceRange.baseArrayLayer = 0;
- barrier.subresourceRange.layerCount = GetArrayLayers();
-
- barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ // transitionBarrierStart specify the index where barriers for current transition start in
+ // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
+ // have already added into the vector during current transition.
+ ASSERT(barriers->size() - transitionBarrierStart <= 1);
if (mExternalState == ExternalState::PendingAcquire) {
+ if (barriers->size() == transitionBarrierStart) {
+ barriers->push_back(BuildMemoryBarrier(
+ GetFormat(), mHandle, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
+ SubresourceRange::SingleSubresource(0, 0)));
+ }
+
// Transfer texture from external queue to graphics queue
- barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
- barrier.dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
+ (*barriers)[transitionBarrierStart].srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+ (*barriers)[transitionBarrierStart].dstQueueFamilyIndex =
+ ToBackend(GetDevice())->GetGraphicsQueueFamily();
// Don't override oldLayout to leave it as VK_IMAGE_LAYOUT_UNDEFINED
// TODO(http://crbug.com/dawn/200)
mExternalState = ExternalState::Acquired;
-
} else if (mExternalState == ExternalState::PendingRelease) {
+ if (barriers->size() == transitionBarrierStart) {
+ barriers->push_back(BuildMemoryBarrier(
+ GetFormat(), mHandle, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
+ SubresourceRange::SingleSubresource(0, 0)));
+ }
+
// Transfer texture from graphics queue to external queue
- barrier.srcQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
- barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
- barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
+ (*barriers)[transitionBarrierStart].srcQueueFamilyIndex =
+ ToBackend(GetDevice())->GetGraphicsQueueFamily();
+ (*barriers)[transitionBarrierStart].dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+ (*barriers)[transitionBarrierStart].newLayout = VK_IMAGE_LAYOUT_GENERAL;
mExternalState = ExternalState::Released;
}
- // Move required semaphores into waitSemaphores
+ mLastExternalState = mExternalState;
+
recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
mWaitRequirements.begin(), mWaitRequirements.end());
mWaitRequirements.clear();
+ }
+
+ bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
+ // Reuse the texture directly and avoid encoding barriers when it isn't needed.
+ bool lastReadOnly = (lastUsage & kReadOnlyTextureUsages) == lastUsage;
+ if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
+ return true;
+ }
+ return false;
+ }
+ void Texture::TransitionFullUsage(CommandRecordingContext* recordingContext,
+ wgpu::TextureUsage usage) {
+ TransitionUsageNow(recordingContext, usage, GetAllSubresources());
+ }
+
+ void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
+ const PassTextureUsage& textureUsages,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ size_t transitionBarrierStart = imageBarriers->size();
+ const Format& format = GetFormat();
+
+ wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
+ wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+
+ uint32_t subresourceCount = GetSubresourceCount();
+ ASSERT(textureUsages.subresourceUsages.size() == subresourceCount);
+ // This transitions assume it is a 2D texture
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+
+ // If new usages of all subresources are the same and old usages of all subresources are
+ // the same too, we can use one barrier to do state transition for all subresources.
+ // Note that if the texture has only one mip level and one array slice, it will fall into
+ // this category.
+ if (textureUsages.sameUsagesAcrossSubresources && mSameLastUsagesAcrossSubresources) {
+ if (CanReuseWithoutBarrier(mSubresourceLastUsages[0], textureUsages.usage)) {
+ return;
+ }
+
+ imageBarriers->push_back(BuildMemoryBarrier(format, mHandle, mSubresourceLastUsages[0],
+ textureUsages.usage, GetAllSubresources()));
+ allLastUsages = mSubresourceLastUsages[0];
+ allUsages = textureUsages.usage;
+ for (uint32_t i = 0; i < subresourceCount; ++i) {
+ mSubresourceLastUsages[i] = textureUsages.usage;
+ }
+ } else {
+ for (uint32_t arrayLayer = 0; arrayLayer < GetArrayLayers(); ++arrayLayer) {
+ for (uint32_t mipLevel = 0; mipLevel < GetNumMipLevels(); ++mipLevel) {
+ uint32_t index = GetSubresourceIndex(mipLevel, arrayLayer);
+
+ // Avoid encoding barriers when it isn't needed.
+ if (textureUsages.subresourceUsages[index] == wgpu::TextureUsage::None) {
+ continue;
+ }
+
+ if (CanReuseWithoutBarrier(mSubresourceLastUsages[index],
+ textureUsages.subresourceUsages[index])) {
+ continue;
+ }
+ imageBarriers->push_back(BuildMemoryBarrier(
+ format, mHandle, mSubresourceLastUsages[index],
+ textureUsages.subresourceUsages[index],
+ SubresourceRange::SingleSubresource(mipLevel, arrayLayer)));
+ allLastUsages |= mSubresourceLastUsages[index];
+ allUsages |= textureUsages.subresourceUsages[index];
+ mSubresourceLastUsages[index] = textureUsages.subresourceUsages[index];
+ }
+ }
+ }
+
+ if (mExternalState != ExternalState::InternalOnly) {
+ TweakTransitionForExternalUsage(recordingContext, imageBarriers,
+ transitionBarrierStart);
+ }
+
+ *srcStages |= VulkanPipelineStage(allLastUsages, format);
+ *dstStages |= VulkanPipelineStage(allUsages, format);
+ mSameLastUsagesAcrossSubresources = textureUsages.sameUsagesAcrossSubresources;
+ }
+
+ void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ std::vector<VkImageMemoryBarrier> barriers;
+ const Format& format = GetFormat();
+
+ wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+ uint32_t subresourceCount = GetSubresourceCount();
+
+ // This transitions assume it is a 2D texture
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+
+ // If the usages transitions can cover all subresources, and old usages of all subresources
+ // are the same, then we can use one barrier to do state transition for all subresources.
+ // Note that if the texture has only one mip level and one array slice, it will fall into
+ // this category.
+ bool areAllSubresourcesCovered = range.levelCount * range.layerCount == subresourceCount;
+ if (mSameLastUsagesAcrossSubresources && areAllSubresourcesCovered) {
+ ASSERT(range.baseMipLevel == 0 && range.baseArrayLayer == 0);
+ if (CanReuseWithoutBarrier(mSubresourceLastUsages[0], usage)) {
+ return;
+ }
+ barriers.push_back(
+ BuildMemoryBarrier(format, mHandle, mSubresourceLastUsages[0], usage, range));
+ allLastUsages = mSubresourceLastUsages[0];
+ for (uint32_t i = 0; i < subresourceCount; ++i) {
+ mSubresourceLastUsages[i] = usage;
+ }
+ } else {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ uint32_t index = GetSubresourceIndex(level, layer);
+
+ if (CanReuseWithoutBarrier(mSubresourceLastUsages[index], usage)) {
+ continue;
+ }
+
+ barriers.push_back(
+ BuildMemoryBarrier(format, mHandle, mSubresourceLastUsages[index], usage,
+ SubresourceRange::SingleSubresource(level, layer)));
+ allLastUsages |= mSubresourceLastUsages[index];
+ mSubresourceLastUsages[index] = usage;
+ }
+ }
+ }
+
+ if (mExternalState != ExternalState::InternalOnly) {
+ TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
+ }
+
+ VkPipelineStageFlags srcStages = VulkanPipelineStage(allLastUsages, format);
+ VkPipelineStageFlags dstStages = VulkanPipelineStage(usage, format);
ToBackend(GetDevice())
->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
- nullptr, 0, nullptr, 1, &barrier);
+ nullptr, 0, nullptr, barriers.size(), barriers.data());
- mLastUsage = usage;
- mLastExternalState = mExternalState;
+ mSameLastUsagesAcrossSubresources = areAllSubresourcesCovered;
}
MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
+ const SubresourceRange& range,
TextureBase::ClearValue clearValue) {
Device* device = ToBackend(GetDevice());
uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
- TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst);
+ TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
if (GetFormat().isRenderable) {
- VkImageSubresourceRange range = {};
- range.aspectMask = GetVkAspectMask();
- range.levelCount = 1;
- range.layerCount = 1;
-
- for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
- range.baseMipLevel = level;
- for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
- ++layer) {
+ VkImageSubresourceRange imageRange = {};
+ imageRange.aspectMask = GetVkAspectMask();
+ imageRange.levelCount = 1;
+ imageRange.layerCount = 1;
+
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ imageRange.baseMipLevel = level;
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, layer))) {
// Skip lazy clears if already initialized.
continue;
}
- range.baseArrayLayer = layer;
+ imageRange.baseArrayLayer = layer;
if (GetFormat().HasDepthOrStencil()) {
VkClearDepthStencilValue clearDepthStencilValue[1];
clearDepthStencilValue[0].depth = fClearColor;
clearDepthStencilValue[0].stencil = clearColor;
- device->fn.CmdClearDepthStencilImage(recordingContext->commandBuffer,
- GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- clearDepthStencilValue, 1, &range);
+ device->fn.CmdClearDepthStencilImage(
+ recordingContext->commandBuffer, GetHandle(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clearDepthStencilValue, 1,
+ &imageRange);
} else {
VkClearColorValue clearColorValue = {
{fClearColor, fClearColor, fClearColor, fClearColor}};
device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- &clearColorValue, 1, &range);
+ &clearColorValue, 1, &imageRange);
}
}
}
@@ -756,9 +905,9 @@ namespace dawn_native { namespace vulkan {
// TODO(natlee@microsoft.com): test compressed textures are cleared
// create temp buffer with clear color to copy to the texture image
uint32_t bytesPerRow =
- Align((GetSize().width / GetFormat().blockWidth) * GetFormat().blockByteSize,
+ Align((GetWidth() / GetFormat().blockWidth) * GetFormat().blockByteSize,
kTextureBytesPerRowAlignment);
- uint64_t bufferSize64 = bytesPerRow * (GetSize().height / GetFormat().blockHeight);
+ uint64_t bufferSize64 = bytesPerRow * (GetHeight() / GetFormat().blockHeight);
if (bufferSize64 > std::numeric_limits<uint32_t>::max()) {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
}
@@ -775,13 +924,15 @@ namespace dawn_native { namespace vulkan {
bufferCopy.offset = uploadHandle.startOffset;
bufferCopy.bytesPerRow = bytesPerRow;
- for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
Extent3D copySize = GetMipLevelVirtualSize(level);
- for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
- ++layer) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleSubresource(level, layer))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -804,23 +955,18 @@ namespace dawn_native { namespace vulkan {
}
}
if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
- layerCount);
+ SetIsSubresourceContentInitialized(true, range);
device->IncrementLazyClearCountForTesting();
}
return {};
}
void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount) {
+ const SubresourceRange& range) {
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
}
- if (!IsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
- layerCount)) {
+ if (!IsSubresourceContentInitialized(range)) {
// TODO(jiawei.shao@intel.com): initialize textures in BC formats with Buffer-to-Texture
// copies.
if (GetFormat().isCompressed) {
@@ -829,9 +975,8 @@ namespace dawn_native { namespace vulkan {
// If subresource has not been initialized, clear it to black as it could contain dirty
// bits from recycled memory
- GetDevice()->ConsumedError(ClearTexture(recordingContext, baseMipLevel, levelCount,
- baseArrayLayer, layerCount,
- TextureBase::ClearValue::Zero));
+ GetDevice()->ConsumedError(
+ ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
}
}
@@ -844,6 +989,14 @@ namespace dawn_native { namespace vulkan {
}
MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+ if ((GetTexture()->GetUsage() &
+ ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
+ // If the texture view has no other usage than CopySrc and CopyDst, then it can't
+ // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
+ // validation errors warn if you create such a vkImageView, so return early.
+ return {};
+ }
+
Device* device = ToBackend(GetTexture()->GetDevice());
VkImageViewCreateInfo createInfo;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
index 42c6216a9d0..8a1564af72b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
@@ -18,6 +18,7 @@
#include "dawn_native/Texture.h"
#include "common/vulkan_platform.h"
+#include "dawn_native/PassResourceUsage.h"
#include "dawn_native/ResourceMemoryAllocation.h"
#include "dawn_native/vulkan/ExternalHandle.h"
#include "dawn_native/vulkan/external_memory/MemoryService.h"
@@ -63,13 +64,20 @@ namespace dawn_native { namespace vulkan {
// Transitions the texture to be used as `usage`, recording any necessary barrier in
// `commands`.
// TODO(cwallez@chromium.org): coalesce barriers and do them early when possible.
+ void TransitionFullUsage(CommandRecordingContext* recordingContext,
+ wgpu::TextureUsage usage);
+
void TransitionUsageNow(CommandRecordingContext* recordingContext,
- wgpu::TextureUsage usage);
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range);
+ void TransitionUsageForPass(CommandRecordingContext* recordingContext,
+ const PassTextureUsage& textureUsages,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
+
void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount);
+ const SubresourceRange& range);
MaybeError SignalAndDestroy(VkSemaphore* outSignalSemaphore);
// Binds externally allocated memory to the VkImage and on success, takes ownership of
@@ -90,12 +98,14 @@ namespace dawn_native { namespace vulkan {
void DestroyImpl() override;
MaybeError ClearTexture(CommandRecordingContext* recordingContext,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
+ const SubresourceRange& range,
TextureBase::ClearValue);
+ void TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+ std::vector<VkImageMemoryBarrier>* barriers,
+ size_t transitionBarrierStart);
+ bool CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage);
+
VkImage mHandle = VK_NULL_HANDLE;
ResourceMemoryAllocation mMemoryAllocation;
VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
@@ -113,9 +123,12 @@ namespace dawn_native { namespace vulkan {
VkSemaphore mSignalSemaphore = VK_NULL_HANDLE;
std::vector<VkSemaphore> mWaitRequirements;
+ bool mSameLastUsagesAcrossSubresources = true;
+
// A usage of none will make sure the texture is transitioned before its first use as
// required by the Vulkan spec.
- wgpu::TextureUsage mLastUsage = wgpu::TextureUsage::None;
+ std::vector<wgpu::TextureUsage> mSubresourceLastUsages =
+ std::vector<wgpu::TextureUsage>(GetSubresourceCount(), wgpu::TextureUsage::None);
};
class TextureView final : public TextureViewBase {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
index 91b45ca1f05..4ce513f5840 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
@@ -82,7 +82,6 @@ namespace dawn_native { namespace vulkan {
region.imageSubresource.aspectMask = texture->GetVkAspectMask();
region.imageSubresource.mipLevel = textureCopy.mipLevel;
region.imageSubresource.baseArrayLayer = textureCopy.arrayLayer;
- region.imageSubresource.layerCount = 1;
region.imageOffset.x = textureCopy.origin.x;
region.imageOffset.y = textureCopy.origin.y;
@@ -91,7 +90,10 @@ namespace dawn_native { namespace vulkan {
Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
region.imageExtent.width = imageExtent.width;
region.imageExtent.height = imageExtent.height;
- region.imageExtent.depth = copySize.depth;
+
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ region.imageSubresource.layerCount = copySize.depth;
+ region.imageExtent.depth = 1;
return region;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
index 02ef6d3737b..36ebd34fe67 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
@@ -21,6 +21,71 @@
namespace dawn_native { namespace vulkan {
+ // A Helper type used to build a pNext chain of extension structs.
+ // Usage is:
+ // 1) Create instance, passing the address of the first struct in the
+ // chain. This will parse the existing |pNext| chain in it to find
+ // its tail.
+ //
+ // 2) Call Add(&vk_struct) every time a new struct needs to be appended
+ // to the chain.
+ //
+ // 3) Alternatively, call Add(&vk_struct, VK_STRUCTURE_TYPE_XXX) to
+ // initialize the struct with a given VkStructureType value while
+ // appending it to the chain.
+ //
+ // Examples:
+ // VkPhysicalFeatures2 features2 = {
+ // .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
+ // .pNext = nullptr,
+ // };
+ //
+ // PNextChainBuilder featuresChain(&features2);
+ //
+ // featuresChain.Add(&featuresExtensions.subgroupSizeControl,
+ // VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+ //
+ struct PNextChainBuilder {
+ // Constructor takes the address of a Vulkan structure instance, and
+ // walks its pNext chain to record the current location of its tail.
+ //
+ // NOTE: Some VK_STRUCT_TYPEs define their pNext field as a const void*
+ // which is why the VkBaseOutStructure* casts below are necessary.
+ template <typename VK_STRUCT_TYPE>
+ explicit PNextChainBuilder(VK_STRUCT_TYPE* head)
+ : mCurrent(reinterpret_cast<VkBaseOutStructure*>(head)) {
+ // Find the end of the current chain.
+ while (mCurrent->pNext != nullptr) {
+ mCurrent = mCurrent->pNext;
+ }
+ }
+
+ // Add one item to the chain. |vk_struct| must be a Vulkan structure
+ // that is already initialized.
+ template <typename VK_STRUCT_TYPE>
+ void Add(VK_STRUCT_TYPE* vkStruct) {
+ // Sanity checks to ensure proper type safety.
+ static_assert(
+ offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
+ offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
+ "Argument type is not a proper Vulkan structure type");
+ vkStruct->pNext = nullptr;
+
+ mCurrent->pNext = reinterpret_cast<VkBaseOutStructure*>(vkStruct);
+ mCurrent = mCurrent->pNext;
+ }
+
+ // A variant of Add() above that also initializes the |sType| field in |vk_struct|.
+ template <typename VK_STRUCT_TYPE>
+ void Add(VK_STRUCT_TYPE* vkStruct, VkStructureType sType) {
+ vkStruct->sType = sType;
+ Add(vkStruct);
+ }
+
+ private:
+ VkBaseOutStructure* mCurrent;
+ };
+
VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp
index 7cd45b07d6a..f329f0851e9 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp
@@ -90,7 +90,8 @@ namespace dawn_native { namespace vulkan {
std::string message = std::string(context) + " failed with " + VkResultAsString(result);
- if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
+ if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_ERROR_OUT_OF_HOST_MEMORY ||
+ result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
return DAWN_OUT_OF_MEMORY_ERROR(message);
} else if (result == VK_ERROR_DEVICE_LOST) {
return DAWN_DEVICE_LOST_ERROR(message);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.cpp
new file mode 100644
index 00000000000..1e7f23d5e7e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.cpp
@@ -0,0 +1,320 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/vulkan/VulkanExtensions.h"
+
+#include "common/Assert.h"
+#include "common/vulkan_platform.h"
+
+#include <array>
+#include <limits>
+
+namespace dawn_native { namespace vulkan {
+
+ static constexpr uint32_t VulkanVersion_1_1 = VK_MAKE_VERSION(1, 1, 0);
+ static constexpr uint32_t VulkanVersion_1_2 = VK_MAKE_VERSION(1, 2, 0);
+ static constexpr uint32_t NeverPromoted = std::numeric_limits<uint32_t>::max();
+
+ // A static array for InstanceExtInfo that can be indexed with InstanceExts.
+ // GetInstanceExtInfo checks that "index" matches the index used to access this array so an
+ // assert will fire if it isn't in the correct order.
+ static constexpr size_t kInstanceExtCount = static_cast<size_t>(InstanceExt::EnumCount);
+ static constexpr std::array<InstanceExtInfo, kInstanceExtCount> sInstanceExtInfos{{
+ //
+ {InstanceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+ VulkanVersion_1_1},
+ {InstanceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+ VulkanVersion_1_1},
+ {InstanceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+ VulkanVersion_1_1},
+
+ {InstanceExt::Surface, "VK_KHR_surface", NeverPromoted},
+ {InstanceExt::FuchsiaImagePipeSurface, "VK_FUCHSIA_imagepipe_surface", NeverPromoted},
+ {InstanceExt::MetalSurface, "VK_EXT_metal_surface", NeverPromoted},
+ {InstanceExt::WaylandSurface, "VK_KHR_wayland_surface", NeverPromoted},
+ {InstanceExt::Win32Surface, "VK_KHR_win32_surface", NeverPromoted},
+ {InstanceExt::XcbSurface, "VK_KHR_xcb_surface", NeverPromoted},
+ {InstanceExt::XlibSurface, "VK_KHR_xlib_surface", NeverPromoted},
+
+ {InstanceExt::DebugReport, "VK_EXT_debug_report", NeverPromoted}
+ //
+ }};
+
+ void InstanceExtSet::Set(InstanceExt extension, bool enabled) {
+ extensionBitSet.set(static_cast<uint32_t>(extension), enabled);
+ }
+
+ bool InstanceExtSet::Has(InstanceExt extension) const {
+ return extensionBitSet[static_cast<uint32_t>(extension)];
+ }
+
+ const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext) {
+ uint32_t index = static_cast<uint32_t>(ext);
+ ASSERT(index < sInstanceExtInfos.size());
+ ASSERT(sInstanceExtInfos[index].index == ext);
+ return sInstanceExtInfos[index];
+ }
+
+ std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap() {
+ std::unordered_map<std::string, InstanceExt> result;
+ for (const InstanceExtInfo& info : sInstanceExtInfos) {
+ result[info.name] = info.index;
+ }
+ return result;
+ }
+
+ InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts) {
+ // We need to check that all transitive dependencies of extensions are advertised.
+ // To do that in a single pass and no data structures, the extensions are topologically
+ // sorted in the definition of InstanceExt.
+ // To ensure the order is correct, we mark visited extensions in `visitedSet` and each
+ // dependency check will first assert all its dependents have been visited.
+ InstanceExtSet visitedSet;
+ InstanceExtSet trimmedSet;
+
+ auto HasDep = [&](InstanceExt ext) -> bool {
+ ASSERT(visitedSet.Has(ext));
+ return trimmedSet.Has(ext);
+ };
+
+ for (uint32_t i = 0; i < sInstanceExtInfos.size(); i++) {
+ InstanceExt ext = static_cast<InstanceExt>(i);
+
+ bool hasDependencies = false;
+ switch (ext) {
+ case InstanceExt::GetPhysicalDeviceProperties2:
+ case InstanceExt::Surface:
+ case InstanceExt::DebugReport:
+ hasDependencies = true;
+ break;
+
+ case InstanceExt::ExternalMemoryCapabilities:
+ case InstanceExt::ExternalSemaphoreCapabilities:
+ hasDependencies = HasDep(InstanceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case InstanceExt::FuchsiaImagePipeSurface:
+ case InstanceExt::MetalSurface:
+ case InstanceExt::WaylandSurface:
+ case InstanceExt::Win32Surface:
+ case InstanceExt::XcbSurface:
+ case InstanceExt::XlibSurface:
+ hasDependencies = HasDep(InstanceExt::Surface);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ trimmedSet.Set(ext, hasDependencies && advertisedExts.Has(ext));
+ visitedSet.Set(ext, true);
+ }
+
+ return trimmedSet;
+ }
+
+ void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version) {
+ for (const InstanceExtInfo& info : sInstanceExtInfos) {
+ if (info.versionPromoted <= version) {
+ extensions->Set(info.index, true);
+ }
+ }
+ }
+
+ static constexpr size_t kDeviceExtCount = static_cast<size_t>(DeviceExt::EnumCount);
+ static constexpr std::array<DeviceExtInfo, kDeviceExtCount> sDeviceExtInfos{{
+ //
+ {DeviceExt::BindMemory2, "VK_KHR_bind_memory2", VulkanVersion_1_1},
+ {DeviceExt::Maintenance1, "VK_KHR_maintenance1", VulkanVersion_1_1},
+ {DeviceExt::StorageBufferStorageClass, "VK_KHR_storage_buffer_storage_class",
+ VulkanVersion_1_1},
+ {DeviceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+ VulkanVersion_1_1},
+ {DeviceExt::GetMemoryRequirements2, "VK_KHR_get_memory_requirements2", VulkanVersion_1_1},
+ {DeviceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+ VulkanVersion_1_1},
+ {DeviceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+ VulkanVersion_1_1},
+ {DeviceExt::ExternalMemory, "VK_KHR_external_memory", VulkanVersion_1_1},
+ {DeviceExt::ExternalSemaphore, "VK_KHR_external_semaphore", VulkanVersion_1_1},
+ {DeviceExt::_16BitStorage, "VK_KHR_16bit_storage", VulkanVersion_1_1},
+ {DeviceExt::SamplerYCbCrConversion, "VK_KHR_sampler_ycbcr_conversion", VulkanVersion_1_1},
+
+ {DeviceExt::ImageFormatList, "VK_KHR_image_format_list", VulkanVersion_1_2},
+ {DeviceExt::ShaderFloat16Int8, "VK_KHR_shader_float16_int8", VulkanVersion_1_2},
+
+ {DeviceExt::ExternalMemoryFD, "VK_KHR_external_memory_fd", NeverPromoted},
+ {DeviceExt::ExternalMemoryDmaBuf, "VK_EXT_external_memory_dma_buf", NeverPromoted},
+ {DeviceExt::ExternalMemoryZirconHandle, "VK_FUCHSIA_external_memory", NeverPromoted},
+ {DeviceExt::ExternalSemaphoreFD, "VK_KHR_external_semaphore_fd", NeverPromoted},
+ {DeviceExt::ExternalSemaphoreZirconHandle, "VK_FUCHSIA_external_semaphore", NeverPromoted},
+
+ {DeviceExt::DebugMarker, "VK_EXT_debug_marker", NeverPromoted},
+ {DeviceExt::ImageDrmFormatModifier, "VK_EXT_image_drm_format_modifier", NeverPromoted},
+ {DeviceExt::Swapchain, "VK_KHR_swapchain", NeverPromoted},
+ {DeviceExt::SubgroupSizeControl, "VK_EXT_subgroup_size_control", NeverPromoted},
+ //
+ }};
+
+ void DeviceExtSet::Set(DeviceExt extension, bool enabled) {
+ extensionBitSet.set(static_cast<uint32_t>(extension), enabled);
+ }
+
+ bool DeviceExtSet::Has(DeviceExt extension) const {
+ return extensionBitSet[static_cast<uint32_t>(extension)];
+ }
+
+ const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext) {
+ uint32_t index = static_cast<uint32_t>(ext);
+ ASSERT(index < sDeviceExtInfos.size());
+ ASSERT(sDeviceExtInfos[index].index == ext);
+ return sDeviceExtInfos[index];
+ }
+
+ std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap() {
+ std::unordered_map<std::string, DeviceExt> result;
+ for (const DeviceExtInfo& info : sDeviceExtInfos) {
+ result[info.name] = info.index;
+ }
+ return result;
+ }
+
+ DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+ const InstanceExtSet& instanceExts,
+ uint32_t icdVersion) {
+ // This is very similar to EnsureDependencies for instanceExtSet. See comment there for
+ // an explanation of what happens.
+ DeviceExtSet visitedSet;
+ DeviceExtSet trimmedSet;
+
+ auto HasDep = [&](DeviceExt ext) -> bool {
+ ASSERT(visitedSet.Has(ext));
+ return trimmedSet.Has(ext);
+ };
+
+ for (uint32_t i = 0; i < sDeviceExtInfos.size(); i++) {
+ DeviceExt ext = static_cast<DeviceExt>(i);
+
+ bool hasDependencies = false;
+ switch (ext) {
+ // Happy extensions don't need anybody else!
+ case DeviceExt::BindMemory2:
+ case DeviceExt::GetMemoryRequirements2:
+ case DeviceExt::Maintenance1:
+ case DeviceExt::ImageFormatList:
+ case DeviceExt::StorageBufferStorageClass:
+ hasDependencies = true;
+ break;
+
+ // Physical device extensions technically don't require the instance to support
+ // them but VulkanFunctions only loads the function pointers if the instance
+ // advertises the extension. So if we didn't have this check, we'd risk a calling
+ // a nullptr.
+ case DeviceExt::GetPhysicalDeviceProperties2:
+ hasDependencies = instanceExts.Has(InstanceExt::GetPhysicalDeviceProperties2);
+ break;
+ case DeviceExt::ExternalMemoryCapabilities:
+ hasDependencies = instanceExts.Has(InstanceExt::ExternalMemoryCapabilities) &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+ case DeviceExt::ExternalSemaphoreCapabilities:
+ hasDependencies =
+ instanceExts.Has(InstanceExt::ExternalSemaphoreCapabilities) &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::DebugMarker:
+ // TODO(cwallez@chromium.org): VK_KHR_debug_report is deprecated, switch to
+ // using VK_KHR_debug_utils instead.
+ hasDependencies = instanceExts.Has(InstanceExt::DebugReport);
+ break;
+
+ case DeviceExt::ImageDrmFormatModifier:
+ hasDependencies = HasDep(DeviceExt::BindMemory2) &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+ HasDep(DeviceExt::ImageFormatList) &&
+ HasDep(DeviceExt::SamplerYCbCrConversion);
+ break;
+
+ case DeviceExt::Swapchain:
+ hasDependencies = instanceExts.Has(InstanceExt::Surface);
+ break;
+
+ case DeviceExt::SamplerYCbCrConversion:
+ hasDependencies = HasDep(DeviceExt::Maintenance1) &&
+ HasDep(DeviceExt::BindMemory2) &&
+ HasDep(DeviceExt::GetMemoryRequirements2) &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::ShaderFloat16Int8:
+ hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::ExternalMemory:
+ hasDependencies = HasDep(DeviceExt::ExternalMemoryCapabilities);
+ break;
+
+ case DeviceExt::ExternalSemaphore:
+ hasDependencies = HasDep(DeviceExt::ExternalSemaphoreCapabilities);
+ break;
+
+ case DeviceExt::ExternalMemoryFD:
+ case DeviceExt::ExternalMemoryZirconHandle:
+ hasDependencies = HasDep(DeviceExt::ExternalMemory);
+ break;
+
+ case DeviceExt::ExternalMemoryDmaBuf:
+ hasDependencies = HasDep(DeviceExt::ExternalMemoryFD);
+ break;
+
+ case DeviceExt::ExternalSemaphoreFD:
+ case DeviceExt::ExternalSemaphoreZirconHandle:
+ hasDependencies = HasDep(DeviceExt::ExternalSemaphore);
+ break;
+
+ case DeviceExt::_16BitStorage:
+ hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+ HasDep(DeviceExt::StorageBufferStorageClass);
+ break;
+
+ case DeviceExt::SubgroupSizeControl:
+ // Using the extension requires DeviceExt::GetPhysicalDeviceProperties2, but we
+ // don't need to check for it as it also requires Vulkan 1.1 in which
+ // VK_KHR_get_physical_device_properties2 was promoted.
+ hasDependencies = icdVersion >= VulkanVersion_1_1;
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ trimmedSet.Set(ext, hasDependencies && advertisedExts.Has(ext));
+ visitedSet.Set(ext, true);
+ }
+
+ return trimmedSet;
+ }
+
+ void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version) {
+ for (const DeviceExtInfo& info : sDeviceExtInfos) {
+ if (info.versionPromoted <= version) {
+ extensions->Set(info.index, true);
+ }
+ }
+ }
+
+}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.h
new file mode 100644
index 00000000000..ba6abc2c0af
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.h
@@ -0,0 +1,141 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
+#define DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
+
+#include <bitset>
+#include <unordered_map>
+
+namespace dawn_native { namespace vulkan {
+
+ // The list of known instance extensions. They must be in dependency order (this is checked
+ // inside EnsureDependencies)
+ enum class InstanceExt {
+ // Promoted to 1.1
+ GetPhysicalDeviceProperties2,
+ ExternalMemoryCapabilities,
+ ExternalSemaphoreCapabilities,
+
+ // Surface extensions
+ Surface,
+ FuchsiaImagePipeSurface,
+ MetalSurface,
+ WaylandSurface,
+ Win32Surface,
+ XcbSurface,
+ XlibSurface,
+
+ // Others
+ DebugReport,
+
+ EnumCount,
+ };
+
+ // A bitset wrapper that is indexed with InstanceExt.
+ struct InstanceExtSet {
+ std::bitset<static_cast<size_t>(InstanceExt::EnumCount)> extensionBitSet;
+ void Set(InstanceExt extension, bool enabled);
+ bool Has(InstanceExt extension) const;
+ };
+
+ // Information about a known instance extension.
+ struct InstanceExtInfo {
+ InstanceExt index;
+ const char* name;
+ // The version in which this extension was promoted as built with VK_MAKE_VERSION,
+ // or NeverPromoted if it was never promoted.
+ uint32_t versionPromoted;
+ };
+
+ // Returns the information about a known InstanceExt
+ const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext);
+ // Returns a map that maps a Vulkan extension name to its InstanceExt.
+ std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap();
+
+ // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+ void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version);
+ // From a set of extensions advertised as supported by the instance (or promoted), remove all
+ // extensions that don't have all their transitive dependencies in advertisedExts.
+ InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts);
+
+ // The list of known device extensions. They must be in dependency order (this is checked
+ // inside EnsureDependencies)
+ enum class DeviceExt {
+ // Promoted to 1.1
+ BindMemory2,
+ Maintenance1,
+ StorageBufferStorageClass,
+ GetPhysicalDeviceProperties2,
+ GetMemoryRequirements2,
+ ExternalMemoryCapabilities,
+ ExternalSemaphoreCapabilities,
+ ExternalMemory,
+ ExternalSemaphore,
+ _16BitStorage,
+ SamplerYCbCrConversion,
+
+ // Promoted to 1.2
+ ImageFormatList,
+ ShaderFloat16Int8,
+
+ // External* extensions
+ ExternalMemoryFD,
+ ExternalMemoryDmaBuf,
+ ExternalMemoryZirconHandle,
+ ExternalSemaphoreFD,
+ ExternalSemaphoreZirconHandle,
+
+ // Others
+ DebugMarker,
+ ImageDrmFormatModifier,
+ Swapchain,
+ SubgroupSizeControl,
+
+ EnumCount,
+ };
+
+ // A bitset wrapper that is indexed with DeviceExt.
+ struct DeviceExtSet {
+ std::bitset<static_cast<size_t>(DeviceExt::EnumCount)> extensionBitSet;
+ void Set(DeviceExt extension, bool enabled);
+ bool Has(DeviceExt extension) const;
+ };
+
+ // A bitset wrapper that is indexed with DeviceExt.
+ struct DeviceExtInfo {
+ DeviceExt index;
+ const char* name;
+ // The version in which this extension was promoted as built with VK_MAKE_VERSION,
+ // or NeverPromoted if it was never promoted.
+ uint32_t versionPromoted;
+ };
+
+ // Returns the information about a known DeviceExt
+ const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext);
+ // Returns a map that maps a Vulkan extension name to its DeviceExt.
+ std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap();
+
+ // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+ void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version);
+ // From a set of extensions advertised as supported by the device (or promoted), remove all
+ // extensions that don't have all their transitive dependencies in advertisedExts or in
+ // instanceExts.
+ DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+ const InstanceExtSet& instanceExts,
+ uint32_t icdVersion);
+
+}} // namespace dawn_native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
index 6099c0a3215..cc070f236c2 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
@@ -74,7 +74,7 @@ namespace dawn_native { namespace vulkan {
GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties);
GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties);
- if (globalInfo.debugReport) {
+ if (globalInfo.HasExt(InstanceExt::DebugReport)) {
GET_INSTANCE_PROC(CreateDebugReportCallbackEXT);
GET_INSTANCE_PROC(DebugReportMessageEXT);
GET_INSTANCE_PROC(DestroyDebugReportCallbackEXT);
@@ -84,13 +84,13 @@ namespace dawn_native { namespace vulkan {
// support the vendor entrypoint in GetProcAddress.
if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferProperties);
- } else if (globalInfo.externalMemoryCapabilities) {
+ } else if (globalInfo.HasExt(InstanceExt::ExternalMemoryCapabilities)) {
GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalBufferProperties, KHR);
}
if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
- } else if (globalInfo.externalSemaphoreCapabilities) {
+ } else if (globalInfo.HasExt(InstanceExt::ExternalSemaphoreCapabilities)) {
GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalSemaphoreProperties, KHR);
}
@@ -102,7 +102,7 @@ namespace dawn_native { namespace vulkan {
GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2);
GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2);
GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2);
- } else if (globalInfo.getPhysicalDeviceProperties2) {
+ } else if (globalInfo.HasExt(InstanceExt::GetPhysicalDeviceProperties2)) {
GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFeatures2, KHR);
GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceProperties2, KHR);
GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFormatProperties2, KHR);
@@ -112,7 +112,7 @@ namespace dawn_native { namespace vulkan {
GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceSparseImageFormatProperties2, KHR);
}
- if (globalInfo.surface) {
+ if (globalInfo.HasExt(InstanceExt::Surface)) {
GET_INSTANCE_PROC(DestroySurfaceKHR);
GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceSupportKHR);
GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
@@ -121,26 +121,26 @@ namespace dawn_native { namespace vulkan {
}
#if defined(VK_USE_PLATFORM_FUCHSIA)
- if (globalInfo.fuchsiaImagePipeSurface) {
+ if (globalInfo.HasExt(InstanceExt::FuchsiaImagePipeSurface)) {
GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
}
#endif // defined(VK_USE_PLATFORM_FUCHSIA)
#if defined(DAWN_ENABLE_BACKEND_METAL)
- if (globalInfo.metalSurface) {
+ if (globalInfo.HasExt(InstanceExt::MetalSurface)) {
GET_INSTANCE_PROC(CreateMetalSurfaceEXT);
}
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
#if defined(DAWN_PLATFORM_WINDOWS)
- if (globalInfo.win32Surface) {
+ if (globalInfo.HasExt(InstanceExt::Win32Surface)) {
GET_INSTANCE_PROC(CreateWin32SurfaceKHR);
GET_INSTANCE_PROC(GetPhysicalDeviceWin32PresentationSupportKHR);
}
#endif // defined(DAWN_PLATFORM_WINDOWS)
#if defined(DAWN_USE_X11)
- if (globalInfo.xlibSurface) {
+ if (globalInfo.HasExt(InstanceExt::XlibSurface)) {
GET_INSTANCE_PROC(CreateXlibSurfaceKHR);
GET_INSTANCE_PROC(GetPhysicalDeviceXlibPresentationSupportKHR);
}
@@ -278,35 +278,35 @@ namespace dawn_native { namespace vulkan {
GET_DEVICE_PROC(UpdateDescriptorSets);
GET_DEVICE_PROC(WaitForFences);
- if (deviceInfo.debugMarker) {
+ if (deviceInfo.HasExt(DeviceExt::DebugMarker)) {
GET_DEVICE_PROC(CmdDebugMarkerBeginEXT);
GET_DEVICE_PROC(CmdDebugMarkerEndEXT);
GET_DEVICE_PROC(CmdDebugMarkerInsertEXT);
}
- if (deviceInfo.externalMemoryFD) {
+ if (deviceInfo.HasExt(DeviceExt::ExternalMemoryFD)) {
GET_DEVICE_PROC(GetMemoryFdKHR);
GET_DEVICE_PROC(GetMemoryFdPropertiesKHR);
}
- if (deviceInfo.externalSemaphoreFD) {
+ if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
GET_DEVICE_PROC(ImportSemaphoreFdKHR);
GET_DEVICE_PROC(GetSemaphoreFdKHR);
}
#if VK_USE_PLATFORM_FUCHSIA
- if (deviceInfo.externalMemoryZirconHandle) {
+ if (deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle)) {
GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
}
- if (deviceInfo.externalSemaphoreZirconHandle) {
+ if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
}
#endif
- if (deviceInfo.swapchain) {
+ if (deviceInfo.HasExt(DeviceExt::Swapchain)) {
GET_DEVICE_PROC(CreateSwapchainKHR);
GET_DEVICE_PROC(DestroySwapchainKHR);
GET_DEVICE_PROC(GetSwapchainImagesKHR);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
index 2a3226245ba..5a66c678fb2 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
@@ -17,6 +17,7 @@
#include "common/Log.h"
#include "dawn_native/vulkan/AdapterVk.h"
#include "dawn_native/vulkan/BackendVk.h"
+#include "dawn_native/vulkan/UtilsVulkan.h"
#include "dawn_native/vulkan/VulkanError.h"
#include <cstring>
@@ -28,10 +29,6 @@ namespace dawn_native { namespace vulkan {
return strncmp(layer.layerName, name, VK_MAX_EXTENSION_NAME_SIZE) == 0;
}
- bool IsExtensionName(const VkExtensionProperties& extension, const char* name) {
- return strncmp(extension.extensionName, name, VK_MAX_EXTENSION_NAME_SIZE) == 0;
- }
-
bool EnumerateInstanceExtensions(const char* layerName,
const dawn_native::vulkan::VulkanFunctions& vkFunctions,
std::vector<VkExtensionProperties>* extensions) {
@@ -54,36 +51,31 @@ namespace dawn_native { namespace vulkan {
const char kLayerNameRenderDocCapture[] = "VK_LAYER_RENDERDOC_Capture";
const char kLayerNameFuchsiaImagePipeSwapchain[] = "VK_LAYER_FUCHSIA_imagepipe_swapchain";
- const char kExtensionNameExtDebugMarker[] = "VK_EXT_debug_marker";
- const char kExtensionNameExtDebugReport[] = "VK_EXT_debug_report";
- const char kExtensionNameExtMetalSurface[] = "VK_EXT_metal_surface";
- const char kExtensionNameKhrExternalMemory[] = "VK_KHR_external_memory";
- const char kExtensionNameKhrExternalMemoryCapabilities[] =
- "VK_KHR_external_memory_capabilities";
- const char kExtensionNameKhrExternalMemoryFD[] = "VK_KHR_external_memory_fd";
- const char kExtensionNameExtExternalMemoryDmaBuf[] = "VK_EXT_external_memory_dma_buf";
- const char kExtensionNameExtImageDrmFormatModifier[] = "VK_EXT_image_drm_format_modifier";
- const char kExtensionNameFuchsiaExternalMemory[] = "VK_FUCHSIA_external_memory";
- const char kExtensionNameKhrExternalSemaphore[] = "VK_KHR_external_semaphore";
- const char kExtensionNameKhrExternalSemaphoreCapabilities[] =
- "VK_KHR_external_semaphore_capabilities";
- const char kExtensionNameKhrExternalSemaphoreFD[] = "VK_KHR_external_semaphore_fd";
- const char kExtensionNameFuchsiaExternalSemaphore[] = "VK_FUCHSIA_external_semaphore";
- const char kExtensionNameKhrGetPhysicalDeviceProperties2[] =
- "VK_KHR_get_physical_device_properties2";
- const char kExtensionNameKhrSurface[] = "VK_KHR_surface";
- const char kExtensionNameKhrSwapchain[] = "VK_KHR_swapchain";
- const char kExtensionNameKhrWaylandSurface[] = "VK_KHR_wayland_surface";
- const char kExtensionNameKhrWin32Surface[] = "VK_KHR_win32_surface";
- const char kExtensionNameKhrXcbSurface[] = "VK_KHR_xcb_surface";
- const char kExtensionNameKhrXlibSurface[] = "VK_KHR_xlib_surface";
- const char kExtensionNameFuchsiaImagePipeSurface[] = "VK_FUCHSIA_imagepipe_surface";
- const char kExtensionNameKhrMaintenance1[] = "VK_KHR_maintenance1";
+ bool VulkanGlobalKnobs::HasExt(InstanceExt ext) const {
+ return extensions.Has(ext);
+ }
+
+ bool VulkanDeviceKnobs::HasExt(DeviceExt ext) const {
+ return extensions.Has(ext);
+ }
ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const Backend& backend) {
VulkanGlobalInfo info = {};
const VulkanFunctions& vkFunctions = backend.GetFunctions();
+ // Gather info on available API version
+ {
+ uint32_t supportedAPIVersion = VK_MAKE_VERSION(1, 0, 0);
+ if (vkFunctions.EnumerateInstanceVersion) {
+ vkFunctions.EnumerateInstanceVersion(&supportedAPIVersion);
+ }
+
+ // Use Vulkan 1.1 if it's available.
+ info.apiVersion = (supportedAPIVersion >= VK_MAKE_VERSION(1, 1, 0))
+ ? VK_MAKE_VERSION(1, 1, 0)
+ : VK_MAKE_VERSION(1, 0, 0);
+ }
+
// Gather the info about the instance layers
{
uint32_t count = 0;
@@ -122,76 +114,40 @@ namespace dawn_native { namespace vulkan {
// Gather the info about the instance extensions
{
- if (!EnumerateInstanceExtensions(nullptr, vkFunctions, &info.extensions)) {
+ std::unordered_map<std::string, InstanceExt> knownExts = CreateInstanceExtNameMap();
+
+ std::vector<VkExtensionProperties> extensionsProperties;
+ if (!EnumerateInstanceExtensions(nullptr, vkFunctions, &extensionsProperties)) {
return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
}
- for (const auto& extension : info.extensions) {
- if (IsExtensionName(extension, kExtensionNameExtDebugReport)) {
- info.debugReport = true;
- }
- if (IsExtensionName(extension, kExtensionNameExtMetalSurface)) {
- info.metalSurface = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrExternalMemoryCapabilities)) {
- info.externalMemoryCapabilities = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrExternalSemaphoreCapabilities)) {
- info.externalSemaphoreCapabilities = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrGetPhysicalDeviceProperties2)) {
- info.getPhysicalDeviceProperties2 = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrSurface)) {
- info.surface = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrWaylandSurface)) {
- info.waylandSurface = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrWin32Surface)) {
- info.win32Surface = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrXcbSurface)) {
- info.xcbSurface = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrXlibSurface)) {
- info.xlibSurface = true;
- }
- if (IsExtensionName(extension, kExtensionNameFuchsiaImagePipeSurface)) {
- info.fuchsiaImagePipeSurface = true;
+ for (const VkExtensionProperties& extension : extensionsProperties) {
+ auto it = knownExts.find(extension.extensionName);
+ if (it != knownExts.end()) {
+ info.extensions.Set(it->second, true);
}
}
- }
- // Specific handling for the Fuchsia swapchain surface creation extension
- // which is normally part of the Fuchsia-specific swapchain layer.
- if (info.fuchsiaImagePipeSwapchain && !info.fuchsiaImagePipeSurface) {
- std::vector<VkExtensionProperties> layer_extensions;
- if (!EnumerateInstanceExtensions(kLayerNameFuchsiaImagePipeSwapchain, vkFunctions,
- &layer_extensions)) {
- return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
- }
-
- for (const auto& extension : layer_extensions) {
- if (IsExtensionName(extension, kExtensionNameFuchsiaImagePipeSurface)) {
- info.fuchsiaImagePipeSurface = true;
- // For now, copy this to the global extension list.
- info.extensions.push_back(extension);
+ // Specific handling for the Fuchsia swapchain surface creation extension
+ // which is normally part of the Fuchsia-specific swapchain layer.
+ if (info.fuchsiaImagePipeSwapchain &&
+ !info.HasExt(InstanceExt::FuchsiaImagePipeSurface)) {
+ if (!EnumerateInstanceExtensions(kLayerNameFuchsiaImagePipeSwapchain, vkFunctions,
+ &extensionsProperties)) {
+ return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
}
- }
- }
- // Gather info on available API version
- {
- uint32_t supportedAPIVersion = VK_MAKE_VERSION(1, 0, 0);
- if (vkFunctions.EnumerateInstanceVersion) {
- vkFunctions.EnumerateInstanceVersion(&supportedAPIVersion);
+ for (const VkExtensionProperties& extension : extensionsProperties) {
+ auto it = knownExts.find(extension.extensionName);
+ if (it != knownExts.end() &&
+ it->second == InstanceExt::FuchsiaImagePipeSurface) {
+ info.extensions.Set(InstanceExt::FuchsiaImagePipeSurface, true);
+ }
+ }
}
- // Use Vulkan 1.1 if it's available.
- info.apiVersion = (supportedAPIVersion >= VK_MAKE_VERSION(1, 1, 0))
- ? VK_MAKE_VERSION(1, 1, 0)
- : VK_MAKE_VERSION(1, 0, 0);
+ MarkPromotedExtensions(&info.extensions, info.apiVersion);
+ info.extensions = EnsureDependencies(info.extensions);
}
// TODO(cwallez@chromium:org): Each layer can expose additional extensions, query them?
@@ -221,11 +177,11 @@ namespace dawn_native { namespace vulkan {
ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
VulkanDeviceInfo info = {};
VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
+ const VulkanGlobalInfo& globalInfo = adapter.GetBackend()->GetGlobalInfo();
const VulkanFunctions& vkFunctions = adapter.GetBackend()->GetFunctions();
- // Gather general info about the device
+ // Query the device properties first to get the ICD's `apiVersion`
vkFunctions.GetPhysicalDeviceProperties(physicalDevice, &info.properties);
- vkFunctions.GetPhysicalDeviceFeatures(physicalDevice, &info.features);
// Gather info about device memory.
{
@@ -272,51 +228,71 @@ namespace dawn_native { namespace vulkan {
return DAWN_INTERNAL_ERROR("vkEnumerateDeviceExtensionProperties");
}
- info.extensions.resize(count);
- DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
- physicalDevice, nullptr, &count, info.extensions.data()),
- "vkEnumerateDeviceExtensionProperties"));
+ std::vector<VkExtensionProperties> extensionsProperties;
+ extensionsProperties.resize(count);
+ DAWN_TRY(
+ CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
+ physicalDevice, nullptr, &count, extensionsProperties.data()),
+ "vkEnumerateDeviceExtensionProperties"));
- for (const auto& extension : info.extensions) {
- if (IsExtensionName(extension, kExtensionNameExtDebugMarker)) {
- info.debugMarker = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrExternalMemory)) {
- info.externalMemory = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrExternalMemoryFD)) {
- info.externalMemoryFD = true;
- }
- if (IsExtensionName(extension, kExtensionNameExtExternalMemoryDmaBuf)) {
- info.externalMemoryDmaBuf = true;
- }
- if (IsExtensionName(extension, kExtensionNameExtImageDrmFormatModifier)) {
- info.imageDrmFormatModifier = true;
- }
- if (IsExtensionName(extension, kExtensionNameFuchsiaExternalMemory)) {
- info.externalMemoryZirconHandle = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrExternalSemaphore)) {
- info.externalSemaphore = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrExternalSemaphoreFD)) {
- info.externalSemaphoreFD = true;
- }
- if (IsExtensionName(extension, kExtensionNameFuchsiaExternalSemaphore)) {
- info.externalSemaphoreZirconHandle = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrSwapchain)) {
- info.swapchain = true;
- }
- if (IsExtensionName(extension, kExtensionNameKhrMaintenance1)) {
- info.maintenance1 = true;
+ std::unordered_map<std::string, DeviceExt> knownExts = CreateDeviceExtNameMap();
+
+ for (const VkExtensionProperties& extension : extensionsProperties) {
+ auto it = knownExts.find(extension.extensionName);
+ if (it != knownExts.end()) {
+ info.extensions.Set(it->second, true);
}
}
+
+ MarkPromotedExtensions(&info.extensions, info.properties.apiVersion);
+ info.extensions = EnsureDependencies(info.extensions, globalInfo.extensions,
+ info.properties.apiVersion);
+ }
+
+ // Gather general and extension features and properties
+ //
+ // Use vkGetPhysicalDevice{Features,Properties}2 if required to gather information about
+ // the extensions. DeviceExt::GetPhysicalDeviceProperties2 is guaranteed to be available
+ // because these extensions (transitively) depend on it in `EnsureDependencies`
+ VkPhysicalDeviceFeatures2 features2 = {};
+ features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ PNextChainBuilder featuresChain(&features2);
+
+ VkPhysicalDeviceProperties2 properties2 = {};
+ properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
+ PNextChainBuilder propertiesChain(&properties2);
+
+ if (info.extensions.Has(DeviceExt::ShaderFloat16Int8)) {
+ featuresChain.Add(&info.shaderFloat16Int8Features,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+ }
+
+ if (info.extensions.Has(DeviceExt::_16BitStorage)) {
+ featuresChain.Add(&info._16BitStorageFeatures,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+ }
+
+ if (info.extensions.Has(DeviceExt::SubgroupSizeControl)) {
+ featuresChain.Add(&info.subgroupSizeControlFeatures,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+ propertiesChain.Add(
+ &info.subgroupSizeControlProperties,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT);
}
- // Mark the extensions promoted to Vulkan 1.1 as available.
- if (info.properties.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- info.maintenance1 = true;
+ // If we have DeviceExt::GetPhysicalDeviceProperties2, use features2 and properties2 so
+ // that features no covered by VkPhysicalDevice{Features,Properties} can be queried.
+ //
+ // Note that info.properties has already been filled at the start of this function to get
+ // `apiVersion`.
+ ASSERT(info.properties.apiVersion != 0);
+ if (info.extensions.Has(DeviceExt::GetPhysicalDeviceProperties2)) {
+ vkFunctions.GetPhysicalDeviceProperties2(physicalDevice, &properties2);
+ vkFunctions.GetPhysicalDeviceFeatures2(physicalDevice, &features2);
+ info.features = features2.features;
+ } else {
+ ASSERT(features2.pNext == nullptr && properties2.pNext == nullptr);
+ vkFunctions.GetPhysicalDeviceFeatures(physicalDevice, &info.features);
}
// TODO(cwallez@chromium.org): gather info about formats
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
index 354d9b38c96..e0512475156 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
@@ -17,6 +17,7 @@
#include "common/vulkan_platform.h"
#include "dawn_native/Error.h"
+#include "dawn_native/vulkan/VulkanExtensions.h"
#include <vector>
@@ -30,29 +31,6 @@ namespace dawn_native { namespace vulkan {
extern const char kLayerNameRenderDocCapture[];
extern const char kLayerNameFuchsiaImagePipeSwapchain[];
- extern const char kExtensionNameExtDebugMarker[];
- extern const char kExtensionNameExtDebugReport[];
- extern const char kExtensionNameExtMetalSurface[];
- extern const char kExtensionNameKhrExternalMemory[];
- extern const char kExtensionNameKhrExternalMemoryCapabilities[];
- extern const char kExtensionNameKhrExternalMemoryFD[];
- extern const char kExtensionNameExtExternalMemoryDmaBuf[];
- extern const char kExtensionNameExtImageDrmFormatModifier[];
- extern const char kExtensionNameFuchsiaExternalMemory[];
- extern const char kExtensionNameKhrExternalSemaphore[];
- extern const char kExtensionNameKhrExternalSemaphoreCapabilities[];
- extern const char kExtensionNameKhrExternalSemaphoreFD[];
- extern const char kExtensionNameFuchsiaExternalSemaphore[];
- extern const char kExtensionNameKhrGetPhysicalDeviceProperties2[];
- extern const char kExtensionNameKhrSurface[];
- extern const char kExtensionNameKhrSwapchain[];
- extern const char kExtensionNameKhrWaylandSurface[];
- extern const char kExtensionNameKhrWin32Surface[];
- extern const char kExtensionNameKhrXcbSurface[];
- extern const char kExtensionNameKhrXlibSurface[];
- extern const char kExtensionNameFuchsiaImagePipeSurface[];
- extern const char kExtensionNameKhrMaintenance1[];
-
// Global information - gathered before the instance is created
struct VulkanGlobalKnobs {
// Layers
@@ -61,23 +39,12 @@ namespace dawn_native { namespace vulkan {
bool renderDocCapture = false;
bool fuchsiaImagePipeSwapchain = false;
- // Extensions
- bool debugReport = false;
- bool externalMemoryCapabilities = false;
- bool externalSemaphoreCapabilities = false;
- bool getPhysicalDeviceProperties2 = false;
- bool metalSurface = false;
- bool surface = false;
- bool waylandSurface = false;
- bool win32Surface = false;
- bool xcbSurface = false;
- bool xlibSurface = false;
- bool fuchsiaImagePipeSurface = false;
+ bool HasExt(InstanceExt ext) const;
+ InstanceExtSet extensions;
};
struct VulkanGlobalInfo : VulkanGlobalKnobs {
std::vector<VkLayerProperties> layers;
- std::vector<VkExtensionProperties> extensions;
uint32_t apiVersion;
// TODO(cwallez@chromium.org): layer instance extensions
};
@@ -85,30 +52,24 @@ namespace dawn_native { namespace vulkan {
// Device information - gathered before the device is created.
struct VulkanDeviceKnobs {
VkPhysicalDeviceFeatures features;
+ VkPhysicalDeviceShaderFloat16Int8FeaturesKHR shaderFloat16Int8Features;
+ VkPhysicalDevice16BitStorageFeaturesKHR _16BitStorageFeatures;
+ VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- // Extensions, promoted extensions are set to true if their core version is supported.
- bool debugMarker = false;
- bool externalMemory = false;
- bool externalMemoryFD = false;
- bool externalMemoryDmaBuf = false;
- bool imageDrmFormatModifier = false;
- bool externalMemoryZirconHandle = false;
- bool externalSemaphore = false;
- bool externalSemaphoreFD = false;
- bool externalSemaphoreZirconHandle = false;
- bool swapchain = false;
- bool maintenance1 = false;
+ bool HasExt(DeviceExt ext) const;
+ DeviceExtSet extensions;
};
struct VulkanDeviceInfo : VulkanDeviceKnobs {
VkPhysicalDeviceProperties properties;
+ VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
+
std::vector<VkQueueFamilyProperties> queueFamilies;
std::vector<VkMemoryType> memoryTypes;
std::vector<VkMemoryHeap> memoryHeaps;
std::vector<VkLayerProperties> layers;
- std::vector<VkExtensionProperties> extensions;
// TODO(cwallez@chromium.org): layer instance extensions
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
index 35c7a078dd6..4129745dc58 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
@@ -61,13 +61,9 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
Service::Service(Device* device) : mDevice(device) {
const VulkanDeviceInfo& deviceInfo = mDevice->GetDeviceInfo();
- const VulkanGlobalInfo& globalInfo =
- ToBackend(mDevice->GetAdapter())->GetBackend()->GetGlobalInfo();
- mSupported = globalInfo.getPhysicalDeviceProperties2 &&
- globalInfo.externalMemoryCapabilities && deviceInfo.externalMemory &&
- deviceInfo.externalMemoryFD && deviceInfo.externalMemoryDmaBuf &&
- deviceInfo.imageDrmFormatModifier;
+ mSupported = deviceInfo.HasExt(DeviceExt::ExternalMemoryFD) &&
+ deviceInfo.HasExt(DeviceExt::ImageDrmFormatModifier);
}
Service::~Service() = default;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
index 872432410a8..99057771f63 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
@@ -23,13 +23,7 @@
namespace dawn_native { namespace vulkan { namespace external_memory {
Service::Service(Device* device) : mDevice(device) {
- const VulkanDeviceInfo& deviceInfo = mDevice->GetDeviceInfo();
- const VulkanGlobalInfo& globalInfo =
- ToBackend(mDevice->GetAdapter())->GetBackend()->GetGlobalInfo();
-
- mSupported = globalInfo.getPhysicalDeviceProperties2 &&
- globalInfo.externalMemoryCapabilities && deviceInfo.externalMemory &&
- deviceInfo.externalMemoryFD;
+ mSupported = device->GetDeviceInfo().HasExt(DeviceExt::ExternalMemoryFD);
}
Service::~Service() = default;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
index 10b99555e5c..08d8d630045 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
@@ -23,13 +23,7 @@
namespace dawn_native { namespace vulkan { namespace external_memory {
Service::Service(Device* device) : mDevice(device) {
- const VulkanDeviceInfo& deviceInfo = mDevice->GetDeviceInfo();
- const VulkanGlobalInfo& globalInfo =
- ToBackend(mDevice->GetAdapter())->GetBackend()->GetGlobalInfo();
-
- mSupported = globalInfo.getPhysicalDeviceProperties2 &&
- globalInfo.externalMemoryCapabilities && deviceInfo.externalMemory &&
- deviceInfo.externalMemoryFD;
+ mSupported = device->GetDeviceInfo().HasExt(DeviceExt::ExternalMemoryZirconHandle);
}
Service::~Service() = default;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp
index e79288a996a..aecc8935006 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp
@@ -21,13 +21,7 @@
namespace dawn_native { namespace vulkan { namespace external_semaphore {
Service::Service(Device* device) : mDevice(device) {
- const VulkanDeviceInfo& deviceInfo = mDevice->GetDeviceInfo();
- const VulkanGlobalInfo& globalInfo =
- ToBackend(mDevice->GetAdapter())->GetBackend()->GetGlobalInfo();
-
- mSupported = globalInfo.getPhysicalDeviceProperties2 &&
- globalInfo.externalSemaphoreCapabilities && deviceInfo.externalSemaphore &&
- deviceInfo.externalSemaphoreFD;
+ mSupported = device->GetDeviceInfo().HasExt(DeviceExt::ExternalSemaphoreFD);
// Early out before we try using extension functions
if (!mSupported) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
index fd10076e9eb..b4e3a62e5e3 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
@@ -21,13 +21,7 @@
namespace dawn_native { namespace vulkan { namespace external_semaphore {
Service::Service(Device* device) : mDevice(device) {
- const VulkanDeviceInfo& deviceInfo = mDevice->GetDeviceInfo();
- const VulkanGlobalInfo& globalInfo =
- ToBackend(mDevice->GetAdapter())->GetBackend()->GetGlobalInfo();
-
- mSupported = globalInfo.getPhysicalDeviceProperties2 &&
- globalInfo.externalSemaphoreCapabilities && deviceInfo.externalSemaphore &&
- deviceInfo.externalSemaphoreFD;
+ mSupported = device->GetDeviceInfo().hasExt(DeviceExt::ExternalSemaphoreZirconHandle);
// Early out before we try using extension functions
if (!mSupported) {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
index 9819b225e43..358e7175252 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
@@ -36,13 +36,11 @@ namespace dawn_wire { namespace client {
cmd.handleCreateInfoLength = handleCreateInfoLength;
cmd.handleCreateInfo = nullptr;
- size_t commandSize = cmd.GetRequiredSize();
- size_t requiredSize = commandSize + handleCreateInfoLength;
- char* allocatedBuffer =
- static_cast<char*>(buffer->device->GetClient()->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ char* writeHandleSpace =
+ buffer->device->GetClient()->SerializeCommand(cmd, handleCreateInfoLength);
+
// Serialize the handle into the space after the command.
- handle->SerializeCreate(allocatedBuffer + commandSize);
+ handle->SerializeCreate(writeHandleSpace);
}
} // namespace
@@ -54,12 +52,25 @@ namespace dawn_wire { namespace client {
uint32_t serial = buffer->requestSerial++;
ASSERT(buffer->requests.find(serial) == buffer->requests.end());
+ if (buffer->size > std::numeric_limits<size_t>::max()) {
+ // On buffer creation, we check that mappable buffers do not exceed this size.
+ // So this buffer must not have mappable usage. Inject a validation error.
+ ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(buffer->device),
+ WGPUErrorType_Validation,
+ "Buffer needs the correct map usage bit");
+ callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
+ return;
+ }
+
// Create a ReadHandle for the map request. This is the client's intent to read GPU
// memory.
MemoryTransferService::ReadHandle* readHandle =
- buffer->device->GetClient()->GetMemoryTransferService()->CreateReadHandle(buffer->size);
+ buffer->device->GetClient()->GetMemoryTransferService()->CreateReadHandle(
+ static_cast<size_t>(buffer->size));
if (readHandle == nullptr) {
- callback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, userdata);
+ ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(buffer->device),
+ WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
+ callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
return;
}
@@ -84,13 +95,25 @@ namespace dawn_wire { namespace client {
uint32_t serial = buffer->requestSerial++;
ASSERT(buffer->requests.find(serial) == buffer->requests.end());
+ if (buffer->size > std::numeric_limits<size_t>::max()) {
+ // On buffer creation, we check that mappable buffers do not exceed this size.
+ // So this buffer must not have mappable usage. Inject a validation error.
+ ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(buffer->device),
+ WGPUErrorType_Validation,
+ "Buffer needs the correct map usage bit");
+ callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
+ return;
+ }
+
// Create a WriteHandle for the map request. This is the client's intent to write GPU
// memory.
MemoryTransferService::WriteHandle* writeHandle =
buffer->device->GetClient()->GetMemoryTransferService()->CreateWriteHandle(
- buffer->size);
+ static_cast<size_t>(buffer->size));
if (writeHandle == nullptr) {
- callback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, userdata);
+ ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(buffer->device),
+ WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
+ callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
return;
}
@@ -112,6 +135,13 @@ namespace dawn_wire { namespace client {
Device* device = reinterpret_cast<Device*>(cDevice);
Client* wireClient = device->GetClient();
+ if ((descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 &&
+ descriptor->size > std::numeric_limits<size_t>::max()) {
+ ClientDeviceInjectError(cDevice, WGPUErrorType_OutOfMemory,
+ "Buffer is too large for map usage");
+ return ClientDeviceCreateErrorBuffer(cDevice);
+ }
+
auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(device);
Buffer* buffer = bufferObjectAndSerial->object.get();
// Store the size of the buffer so that mapping operations can allocate a
@@ -123,9 +153,7 @@ namespace dawn_wire { namespace client {
cmd.descriptor = descriptor;
cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(wireClient->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer, *wireClient);
+ wireClient->SerializeCommand(cmd);
return reinterpret_cast<WGPUBuffer>(buffer);
}
@@ -136,15 +164,18 @@ namespace dawn_wire { namespace client {
Device* device = reinterpret_cast<Device*>(cDevice);
Client* wireClient = device->GetClient();
- auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(device);
- Buffer* buffer = bufferObjectAndSerial->object.get();
- buffer->size = descriptor->size;
-
WGPUCreateBufferMappedResult result;
- result.buffer = reinterpret_cast<WGPUBuffer>(buffer);
result.data = nullptr;
result.dataLength = 0;
+ // This buffer is too large to be mapped and to make a WriteHandle for.
+ if (descriptor->size > std::numeric_limits<size_t>::max()) {
+ ClientDeviceInjectError(cDevice, WGPUErrorType_OutOfMemory,
+ "Buffer is too large for mapping");
+ result.buffer = ClientDeviceCreateErrorBuffer(cDevice);
+ return result;
+ }
+
// Create a WriteHandle for the map request. This is the client's intent to write GPU
// memory.
std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle =
@@ -152,7 +183,9 @@ namespace dawn_wire { namespace client {
wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
if (writeHandle == nullptr) {
- // TODO(enga): Support context lost generated by the client.
+ ClientDeviceInjectError(cDevice, WGPUErrorType_OutOfMemory,
+ "Buffer mapping allocation failed");
+ result.buffer = ClientDeviceCreateErrorBuffer(cDevice);
return result;
}
@@ -161,15 +194,21 @@ namespace dawn_wire { namespace client {
// Open the WriteHandle. This returns a pointer and size of mapped memory.
// |result.data| may be null on error.
std::tie(result.data, result.dataLength) = writeHandle->Open();
-
if (result.data == nullptr) {
- // TODO(enga): Support context lost generated by the client.
+ ClientDeviceInjectError(cDevice, WGPUErrorType_OutOfMemory,
+ "Buffer mapping allocation failed");
+ result.buffer = ClientDeviceCreateErrorBuffer(cDevice);
return result;
}
+ auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(device);
+ Buffer* buffer = bufferObjectAndSerial->object.get();
+ buffer->size = descriptor->size;
// Successfully created staging memory. The buffer now owns the WriteHandle.
buffer->writeHandle = std::move(writeHandle);
+ result.buffer = reinterpret_cast<WGPUBuffer>(buffer);
+
// Get the serialization size of the WriteHandle.
size_t handleCreateInfoLength = buffer->writeHandle->SerializeCreateSize();
@@ -180,12 +219,11 @@ namespace dawn_wire { namespace client {
cmd.handleCreateInfoLength = handleCreateInfoLength;
cmd.handleCreateInfo = nullptr;
- size_t commandSize = cmd.GetRequiredSize();
- size_t requiredSize = commandSize + handleCreateInfoLength;
- char* allocatedBuffer = static_cast<char*>(wireClient->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer, *wireClient);
+ char* writeHandleSpace =
+ buffer->device->GetClient()->SerializeCommand(cmd, handleCreateInfoLength);
+
// Serialize the WriteHandle into the space after the command.
- buffer->writeHandle->SerializeCreate(allocatedBuffer + commandSize);
+ buffer->writeHandle->SerializeCreate(writeHandleSpace);
return result;
}
@@ -243,10 +281,7 @@ namespace dawn_wire { namespace client {
cmd.count = count;
cmd.data = static_cast<const uint8_t*>(data);
- Client* wireClient = buffer->device->GetClient();
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(wireClient->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ buffer->device->GetClient()->SerializeCommand(cmd);
}
void ClientHandwrittenBufferUnmap(WGPUBuffer cBuffer) {
@@ -273,14 +308,12 @@ namespace dawn_wire { namespace client {
cmd.writeFlushInfoLength = writeFlushInfoLength;
cmd.writeFlushInfo = nullptr;
- size_t commandSize = cmd.GetRequiredSize();
- size_t requiredSize = commandSize + writeFlushInfoLength;
- char* allocatedBuffer =
- static_cast<char*>(buffer->device->GetClient()->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ char* writeHandleSpace =
+ buffer->device->GetClient()->SerializeCommand(cmd, writeFlushInfoLength);
+
// Serialize flush metadata into the space after the command.
// This closes the handle for writing.
- buffer->writeHandle->SerializeFlush(allocatedBuffer + commandSize);
+ buffer->writeHandle->SerializeFlush(writeHandleSpace);
buffer->writeHandle = nullptr;
} else if (buffer->readHandle) {
@@ -290,10 +323,7 @@ namespace dawn_wire { namespace client {
BufferUnmapCmd cmd;
cmd.self = cBuffer;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer =
- static_cast<char*>(buffer->device->GetClient()->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer, *buffer->device->GetClient());
+ buffer->device->GetClient()->SerializeCommand(cmd);
}
void ClientHandwrittenBufferDestroy(WGPUBuffer cBuffer) {
@@ -306,10 +336,7 @@ namespace dawn_wire { namespace client {
BufferDestroyCmd cmd;
cmd.self = cBuffer;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer =
- static_cast<char*>(buffer->device->GetClient()->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer, *buffer->device->GetClient());
+ buffer->device->GetClient()->SerializeCommand(cmd);
}
WGPUFence ClientHandwrittenQueueCreateFence(WGPUQueue cSelf,
@@ -323,9 +350,7 @@ namespace dawn_wire { namespace client {
cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
cmd.descriptor = descriptor;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(device->GetClient()->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer, *device->GetClient());
+ device->GetClient()->SerializeCommand(cmd);
WGPUFence cFence = reinterpret_cast<WGPUFence>(allocation->object.get());
@@ -360,10 +385,25 @@ namespace dawn_wire { namespace client {
cmd.fence = cFence;
cmd.signalValue = signalValue;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer =
- static_cast<char*>(fence->device->GetClient()->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer, *fence->device->GetClient());
+ queue->device->GetClient()->SerializeCommand(cmd);
+ }
+
+ void ClientHandwrittenQueueWriteBuffer(WGPUQueue cQueue,
+ WGPUBuffer cBuffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ Queue* queue = reinterpret_cast<Queue*>(cQueue);
+ Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
+
+ QueueWriteBufferInternalCmd cmd;
+ cmd.queueId = queue->id;
+ cmd.bufferId = buffer->id;
+ cmd.bufferOffset = bufferOffset;
+ cmd.data = static_cast<const uint8_t*>(data);
+ cmd.size = size;
+
+ queue->device->GetClient()->SerializeCommand(cmd);
}
void ClientDeviceReference(WGPUDevice) {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
index 9790d48af67..1953347f44c 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
@@ -54,14 +54,14 @@ namespace dawn_wire { namespace client {
return result;
}
- void* Client::GetCmdSpace(size_t size) {
+ char* Client::GetCmdSpace(size_t size) {
if (DAWN_UNLIKELY(mIsDisconnected)) {
if (size > mDummyCmdSpace.size()) {
mDummyCmdSpace.resize(size);
}
return mDummyCmdSpace.data();
}
- return mSerializer->GetCmdSpace(size);
+ return static_cast<char*>(mSerializer->GetCmdSpace(size));
}
void Client::Disconnect() {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.h b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
index 7769e338ecb..d8df86d8c98 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
@@ -42,13 +42,22 @@ namespace dawn_wire { namespace client {
const volatile char* HandleCommands(const volatile char* commands, size_t size);
ReservedTexture ReserveTexture(WGPUDevice device);
- void* GetCmdSpace(size_t size);
+ template <typename Cmd>
+ char* SerializeCommand(const Cmd& cmd, size_t extraSize = 0) {
+ size_t requiredSize = cmd.GetRequiredSize();
+ // TODO(cwallez@chromium.org): Check for overflows and allocation success?
+ char* allocatedBuffer = GetCmdSpace(requiredSize + extraSize);
+ cmd.Serialize(allocatedBuffer, *this);
+ return allocatedBuffer + requiredSize;
+ }
void Disconnect();
private:
#include "dawn_wire/client/ClientPrototypes_autogen.inc"
+ char* GetCmdSpace(size_t size);
+
Device* mDevice = nullptr;
CommandSerializer* mSerializer = nullptr;
WireDeserializeAllocator mAllocator;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp
index bfd66344609..e7737107274 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp
@@ -43,7 +43,10 @@ namespace dawn_wire { namespace client {
return false;
}
- mStagingData = std::unique_ptr<uint8_t[]>(new uint8_t[mSize]);
+ mStagingData = std::unique_ptr<uint8_t[]>(new (std::nothrow) uint8_t[mSize]);
+ if (!mStagingData) {
+ return false;
+ }
memcpy(mStagingData.get(), deserializePointer, mSize);
ASSERT(data != nullptr);
@@ -74,7 +77,10 @@ namespace dawn_wire { namespace client {
}
std::pair<void*, size_t> Open() override {
- mStagingData = std::unique_ptr<uint8_t[]>(new uint8_t[mSize]);
+ mStagingData = std::unique_ptr<uint8_t[]>(new (std::nothrow) uint8_t[mSize]);
+ if (!mStagingData) {
+ return std::make_pair(nullptr, 0);
+ }
memset(mStagingData.get(), 0, mSize);
return std::make_pair(mStagingData.get(), mSize);
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
index c69983ae00c..43361b3e625 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
@@ -34,9 +34,7 @@ namespace dawn_wire { namespace client {
cmd.self = reinterpret_cast<WGPUDevice>(this);
cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(mClient->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer, *mClient);
+ mClient->SerializeCommand(cmd);
}
Device::~Device() {
@@ -51,9 +49,7 @@ namespace dawn_wire { namespace client {
cmd.objectType = ObjectType::Queue;
cmd.objectId = mDefaultQueue->id;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(mClient->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ mClient->SerializeCommand(cmd);
mClient->QueueAllocator().Free(mDefaultQueue);
}
@@ -92,10 +88,7 @@ namespace dawn_wire { namespace client {
cmd.self = reinterpret_cast<WGPUDevice>(this);
cmd.filter = filter;
- Client* wireClient = GetClient();
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(wireClient->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer, *wireClient);
+ mClient->SerializeCommand(cmd);
}
bool Device::RequestPopErrorScope(WGPUErrorCallback callback, void* userdata) {
@@ -113,10 +106,7 @@ namespace dawn_wire { namespace client {
cmd.device = reinterpret_cast<WGPUDevice>(this);
cmd.requestSerial = serial;
- Client* wireClient = GetClient();
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(wireClient->GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer, *wireClient);
+ mClient->SerializeCommand(cmd);
return true;
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ObjectAllocator.h b/chromium/third_party/dawn/src/dawn_wire/client/ObjectAllocator.h
index 19a312209c4..215b9f4a32c 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ObjectAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ObjectAllocator.h
@@ -18,6 +18,7 @@
#include "common/Assert.h"
#include "common/Compiler.h"
+#include <limits>
#include <memory>
#include <vector>
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
index 31b52dccfaa..0199aba42ff 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
@@ -39,8 +39,8 @@ namespace dawn_wire { namespace server {
DestroyAllObjects(mProcs);
}
- void* Server::GetCmdSpace(size_t size) {
- return mSerializer->GetCmdSpace(size);
+ char* Server::GetCmdSpace(size_t size) {
+ return static_cast<char*>(mSerializer->GetCmdSpace(size));
}
bool Server::InjectTexture(WGPUTexture texture, uint32_t id, uint32_t generation) {
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.h b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
index effe69e25b1..7dd0303df7c 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
@@ -58,7 +58,15 @@ namespace dawn_wire { namespace server {
bool InjectTexture(WGPUTexture texture, uint32_t id, uint32_t generation);
private:
- void* GetCmdSpace(size_t size);
+ template <typename Cmd>
+ char* SerializeCommand(const Cmd& cmd, size_t extraSize = 0) {
+ size_t requiredSize = cmd.GetRequiredSize();
+ // TODO(cwallez@chromium.org): Check for overflows and allocation success?
+ char* allocatedBuffer = GetCmdSpace(requiredSize + extraSize);
+ cmd.Serialize(allocatedBuffer);
+ return allocatedBuffer + requiredSize;
+ }
+ char* GetCmdSpace(size_t size);
// Forwarding callbacks
static void ForwardUncapturedError(WGPUErrorType type, const char* message, void* userdata);
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
index 89ebacbbc65..1b516f96be5 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
@@ -202,6 +202,7 @@ namespace dawn_wire { namespace server {
// to Unmap and attempt to update mapped data of an error buffer.
return false;
}
+
// Deserialize the flush info and flush updated data from the handle into the target
// of the handle. The target is set via WriteHandle::SetTarget.
return buffer->writeHandle->DeserializeFlush(writeFlushInfo,
@@ -251,14 +252,11 @@ namespace dawn_wire { namespace server {
cmd.initialDataInfoLength = initialDataInfoLength;
cmd.initialDataInfo = nullptr;
- size_t commandSize = cmd.GetRequiredSize();
- size_t requiredSize = commandSize + initialDataInfoLength;
- char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ char* readHandleSpace = SerializeCommand(cmd, initialDataInfoLength);
if (status == WGPUBufferMapAsyncStatus_Success) {
// Serialize the initialization message into the space after the command.
- data->readHandle->SerializeInitialData(ptr, dataLength, allocatedBuffer + commandSize);
+ data->readHandle->SerializeInitialData(ptr, dataLength, readHandleSpace);
// The in-flight map request returned successfully.
// Move the ReadHandle so it is owned by the buffer.
@@ -283,9 +281,7 @@ namespace dawn_wire { namespace server {
cmd.requestSerial = data->requestSerial;
cmd.status = status;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ SerializeCommand(cmd);
if (status == WGPUBufferMapAsyncStatus_Success) {
// The in-flight map request returned successfully.
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
index 66c0d70153d..64ad1bb0c50 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
@@ -31,18 +31,14 @@ namespace dawn_wire { namespace server {
cmd.type = type;
cmd.message = message;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ SerializeCommand(cmd);
}
void Server::OnDeviceLost(const char* message) {
ReturnDeviceLostCallbackCmd cmd;
cmd.message = message;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ SerializeCommand(cmd);
}
bool Server::DoDevicePopErrorScope(WGPUDevice cDevice, uint64_t requestSerial) {
@@ -73,9 +69,7 @@ namespace dawn_wire { namespace server {
cmd.type = type;
cmd.message = message;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ SerializeCommand(cmd);
}
}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp
index de056a1d5e9..1a4105c38bd 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp
@@ -35,9 +35,7 @@ namespace dawn_wire { namespace server {
cmd.fence = data->fence;
cmd.value = data->value;
- size_t requiredSize = cmd.GetRequiredSize();
- char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
- cmd.Serialize(allocatedBuffer);
+ SerializeCommand(cmd);
}
}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
index 4ec808ed1ff..6e47492d3b0 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
@@ -38,4 +38,21 @@ namespace dawn_wire { namespace server {
return true;
}
+ bool Server::DoQueueWriteBufferInternal(ObjectId queueId,
+ ObjectId bufferId,
+ uint64_t bufferOffset,
+ const uint8_t* data,
+ size_t size) {
+ // The null object isn't valid as `self` or `buffer` so we can combine the check with the
+ // check that the ID is valid.
+ auto* queue = QueueObjects().Get(queueId);
+ auto* buffer = BufferObjects().Get(bufferId);
+ if (queue == nullptr || buffer == nullptr) {
+ return false;
+ }
+
+ mProcs.queueWriteBuffer(queue->handle, buffer->handle, bufferOffset, data, size);
+ return true;
+ }
+
}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/fuzzers/BUILD.gn b/chromium/third_party/dawn/src/fuzzers/BUILD.gn
index 71a6d88a96e..f6400b433c2 100644
--- a/chromium/third_party/dawn/src/fuzzers/BUILD.gn
+++ b/chromium/third_party/dawn/src/fuzzers/BUILD.gn
@@ -74,9 +74,7 @@ static_library("dawn_spirv_cross_fuzzer_common") {
"DawnSPIRVCrossFuzzer.cpp",
"DawnSPIRVCrossFuzzer.h",
]
- public_deps = [
- "${dawn_shaderc_dir}:libshaderc_spvc",
- ]
+ public_deps = [ "${dawn_shaderc_dir}:libshaderc_spvc" ]
}
static_library("dawn_wire_server_fuzzer_common") {
@@ -98,12 +96,8 @@ static_library("dawn_wire_server_fuzzer_common") {
# needed.
# Uses Dawn specific options and varies input data
dawn_fuzzer_test("dawn_spirv_cross_glsl_fast_fuzzer") {
- sources = [
- "DawnSPIRVCrossGLSLFastFuzzer.cpp",
- ]
- deps = [
- ":dawn_spirv_cross_fuzzer_common",
- ]
+ sources = [ "DawnSPIRVCrossGLSLFastFuzzer.cpp" ]
+ deps = [ ":dawn_spirv_cross_fuzzer_common" ]
asan_options = [ "allow_user_segv_handler=1" ]
}
@@ -111,12 +105,8 @@ dawn_fuzzer_test("dawn_spirv_cross_glsl_fast_fuzzer") {
# needed.
# Uses Dawn specific options and varies input data
dawn_fuzzer_test("dawn_spirv_cross_hlsl_fast_fuzzer") {
- sources = [
- "DawnSPIRVCrossHLSLFastFuzzer.cpp",
- ]
- deps = [
- ":dawn_spirv_cross_fuzzer_common",
- ]
+ sources = [ "DawnSPIRVCrossHLSLFastFuzzer.cpp" ]
+ deps = [ ":dawn_spirv_cross_fuzzer_common" ]
asan_options = [ "allow_user_segv_handler=1" ]
}
@@ -124,62 +114,48 @@ dawn_fuzzer_test("dawn_spirv_cross_hlsl_fast_fuzzer") {
# needed.
# Uses Dawn specific options and varies input data
dawn_fuzzer_test("dawn_spirv_cross_msl_fast_fuzzer") {
- sources = [
- "DawnSPIRVCrossMSLFastFuzzer.cpp",
- ]
- deps = [
- ":dawn_spirv_cross_fuzzer_common",
- ]
+ sources = [ "DawnSPIRVCrossMSLFastFuzzer.cpp" ]
+ deps = [ ":dawn_spirv_cross_fuzzer_common" ]
asan_options = [ "allow_user_segv_handler=1" ]
}
dawn_fuzzer_test("dawn_spvc_glsl_fast_fuzzer") {
- sources = [
- "DawnSPVCglslFastFuzzer.cpp",
- ]
- deps = [
- "${dawn_shaderc_dir}:libshaderc_spvc",
- ]
+ sources = [ "DawnSPVCglslFastFuzzer.cpp" ]
+ deps = [ "${dawn_shaderc_dir}:libshaderc_spvc" ]
}
dawn_fuzzer_test("dawn_spvc_hlsl_fast_fuzzer") {
- sources = [
- "DawnSPVChlslFastFuzzer.cpp",
- ]
- deps = [
- "${dawn_shaderc_dir}:libshaderc_spvc",
- ]
+ sources = [ "DawnSPVChlslFastFuzzer.cpp" ]
+ deps = [ "${dawn_shaderc_dir}:libshaderc_spvc" ]
}
dawn_fuzzer_test("dawn_spvc_msl_fast_fuzzer") {
- sources = [
- "DawnSPVCmslFastFuzzer.cpp",
- ]
- deps = [
- "${dawn_shaderc_dir}:libshaderc_spvc",
- ]
+ sources = [ "DawnSPVCmslFastFuzzer.cpp" ]
+ deps = [ "${dawn_shaderc_dir}:libshaderc_spvc" ]
}
dawn_fuzzer_test("dawn_wire_server_and_frontend_fuzzer") {
- sources = [
- "DawnWireServerAndFrontendFuzzer.cpp",
- ]
+ sources = [ "DawnWireServerAndFrontendFuzzer.cpp" ]
- deps = [
- ":dawn_wire_server_fuzzer_common",
- ]
+ deps = [ ":dawn_wire_server_fuzzer_common" ]
additional_configs = [ "${dawn_root}/src/common:dawn_internal" ]
}
+if (is_win) {
+ dawn_fuzzer_test("dawn_wire_server_and_d3d12_backend_fuzzer") {
+ sources = [ "DawnWireServerAndD3D12BackendFuzzer.cpp" ]
+
+ deps = [ ":dawn_wire_server_fuzzer_common" ]
+
+ additional_configs = [ "${dawn_root}/src/common:dawn_internal" ]
+ }
+}
+
dawn_fuzzer_test("dawn_wire_server_and_vulkan_backend_fuzzer") {
- sources = [
- "DawnWireServerAndVulkanBackendFuzzer.cpp",
- ]
+ sources = [ "DawnWireServerAndVulkanBackendFuzzer.cpp" ]
- deps = [
- ":dawn_wire_server_fuzzer_common",
- ]
+ deps = [ ":dawn_wire_server_fuzzer_common" ]
additional_configs = [ "${dawn_root}/src/common:dawn_internal" ]
}
@@ -195,5 +171,10 @@ group("dawn_fuzzers") {
":dawn_spvc_hlsl_fast_fuzzer",
":dawn_spvc_msl_fast_fuzzer",
":dawn_wire_server_and_frontend_fuzzer",
+ ":dawn_wire_server_and_vulkan_backend_fuzzer",
]
+
+ if (is_win) {
+ deps += [ ":dawn_wire_server_and_d3d12_backend_fuzzer" ]
+ }
}
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
index e07e74185cd..f1a6047b5b3 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
@@ -59,18 +59,18 @@ typedef struct {
} DawnSwapChainImplementation;
#if defined(DAWN_ENABLE_BACKEND_D3D12) && defined(__cplusplus)
-typedef struct {
+struct DawnWSIContextD3D12 {
WGPUDevice device = nullptr;
-} DawnWSIContextD3D12;
+};
#endif
#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(__OBJC__)
# import <Metal/Metal.h>
-typedef struct {
+struct DawnWSIContextMetal {
id<MTLDevice> device = nil;
id<MTLCommandQueue> queue = nil;
-} DawnWSIContextMetal;
+};
#endif
#ifdef DAWN_ENABLE_BACKEND_OPENGL
diff --git a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
index b144e4ece76..0965871941c 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
@@ -34,7 +34,10 @@ namespace dawn_native { namespace vulkan {
// Can't use DAWN_PLATFORM_LINUX since header included in both dawn and chrome
#ifdef __linux__
- // Common properties of external images represented by FDs
+ // Common properties of external images represented by FDs. On successful import the file
+ // descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
+ // used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
+ // caller can assume the FD is always consumed.
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptor {
public:
int memoryFD; // A file descriptor from an export of the memory of the image
diff --git a/chromium/third_party/dawn/src/tests/BUILD.gn b/chromium/third_party/dawn/src/tests/BUILD.gn
index 4e2ebeda7e5..a1e77415ccc 100644
--- a/chromium/third_party/dawn/src/tests/BUILD.gn
+++ b/chromium/third_party/dawn/src/tests/BUILD.gn
@@ -159,6 +159,9 @@ test("dawn_unittests") {
"unittests/ErrorTests.cpp",
"unittests/ExtensionTests.cpp",
"unittests/GetProcAddressTests.cpp",
+ "unittests/ITypArrayTests.cpp",
+ "unittests/ITypBitsetTests.cpp",
+ "unittests/ITypSpanTests.cpp",
"unittests/LinkedListTests.cpp",
"unittests/MathTests.cpp",
"unittests/ObjectBaseTests.cpp",
@@ -172,6 +175,7 @@ test("dawn_unittests") {
"unittests/SlabAllocatorTests.cpp",
"unittests/SystemUtilsTests.cpp",
"unittests/ToBackendTests.cpp",
+ "unittests/TypedIntegerTests.cpp",
"unittests/validation/BindGroupValidationTests.cpp",
"unittests/validation/BufferValidationTests.cpp",
"unittests/validation/CommandBufferValidationTests.cpp",
@@ -185,6 +189,8 @@ test("dawn_unittests") {
"unittests/validation/FenceValidationTests.cpp",
"unittests/validation/GetBindGroupLayoutValidationTests.cpp",
"unittests/validation/IndexBufferValidationTests.cpp",
+ "unittests/validation/MinimumBufferSizeValidationTests.cpp",
+ "unittests/validation/QuerySetValidationTests.cpp",
"unittests/validation/QueueSubmitValidationTests.cpp",
"unittests/validation/RenderBundleValidationTests.cpp",
"unittests/validation/RenderPassDescriptorValidationTests.cpp",
@@ -277,9 +283,12 @@ source_set("dawn_end2end_tests_sources") {
"end2end/GpuMemorySynchronizationTests.cpp",
"end2end/IndexFormatTests.cpp",
"end2end/MultisampledRenderingTests.cpp",
+ "end2end/MultisampledSamplingTests.cpp",
+ "end2end/NonzeroBufferCreationTests.cpp",
"end2end/NonzeroTextureCreationTests.cpp",
"end2end/ObjectCachingTests.cpp",
"end2end/OpArrayLengthTests.cpp",
+ "end2end/PipelineLayoutTests.cpp",
"end2end/PrimitiveTopologyTests.cpp",
"end2end/QueueTests.cpp",
"end2end/RenderBundleTests.cpp",
@@ -287,8 +296,11 @@ source_set("dawn_end2end_tests_sources") {
"end2end/RenderPassTests.cpp",
"end2end/SamplerTests.cpp",
"end2end/ScissorTests.cpp",
+ "end2end/ShaderFloat16Tests.cpp",
"end2end/StorageTextureTests.cpp",
+ "end2end/SubresourceOutputAttachmentTests.cpp",
"end2end/TextureFormatTests.cpp",
+ "end2end/TextureSubresourceTests.cpp",
"end2end/TextureViewTests.cpp",
"end2end/TextureZeroInitTests.cpp",
"end2end/VertexFormatTests.cpp",
diff --git a/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp b/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp
index 634417cd12c..795d1f948ac 100644
--- a/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp
+++ b/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp
@@ -86,4 +86,172 @@ namespace utils {
return false;
}
}
+
+ uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ return 1u;
+
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ return 2u;
+
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Float:
+ return 4u;
+
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ return 8u;
+
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ return 16u;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ return 8u;
+
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBSfloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return 16u;
+
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Undefined:
+ default:
+ UNREACHABLE();
+ return 0u;
+ }
+ }
+
+ const char* GetGLSLImageFormatQualifier(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ return "r8";
+ case wgpu::TextureFormat::R8Snorm:
+ return "r8_snorm";
+ case wgpu::TextureFormat::R8Uint:
+ return "r8ui";
+ case wgpu::TextureFormat::R8Sint:
+ return "r8i";
+ case wgpu::TextureFormat::R16Uint:
+ return "r16ui";
+ case wgpu::TextureFormat::R16Sint:
+ return "r16i";
+ case wgpu::TextureFormat::R16Float:
+ return "r16f";
+ case wgpu::TextureFormat::RG8Unorm:
+ return "rg8";
+ case wgpu::TextureFormat::RG8Snorm:
+ return "rg8_snorm";
+ case wgpu::TextureFormat::RG8Uint:
+ return "rg8ui";
+ case wgpu::TextureFormat::RG8Sint:
+ return "rg8i";
+ case wgpu::TextureFormat::R32Float:
+ return "r32f";
+ case wgpu::TextureFormat::R32Uint:
+ return "r32ui";
+ case wgpu::TextureFormat::R32Sint:
+ return "r32i";
+ case wgpu::TextureFormat::RG16Uint:
+ return "rg16ui";
+ case wgpu::TextureFormat::RG16Sint:
+ return "rg16i";
+ case wgpu::TextureFormat::RG16Float:
+ return "rg16f";
+ case wgpu::TextureFormat::RGBA8Unorm:
+ return "rgba8";
+ case wgpu::TextureFormat::RGBA8Snorm:
+ return "rgba8_snorm";
+ case wgpu::TextureFormat::RGBA8Uint:
+ return "rgba8ui";
+ case wgpu::TextureFormat::RGBA8Sint:
+ return "rgba8i";
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return "rgb10_a2";
+ case wgpu::TextureFormat::RG11B10Float:
+ return "r11f_g11f_b10f";
+ case wgpu::TextureFormat::RG32Float:
+ return "rg32f";
+ case wgpu::TextureFormat::RG32Uint:
+ return "rg32ui";
+ case wgpu::TextureFormat::RG32Sint:
+ return "rg32i";
+ case wgpu::TextureFormat::RGBA16Uint:
+ return "rgba16ui";
+ case wgpu::TextureFormat::RGBA16Sint:
+ return "rgba16i";
+ case wgpu::TextureFormat::RGBA16Float:
+ return "rgba16f";
+ case wgpu::TextureFormat::RGBA32Float:
+ return "rgba32f";
+ case wgpu::TextureFormat::RGBA32Uint:
+ return "rgba32ui";
+ case wgpu::TextureFormat::RGBA32Sint:
+ return "rgba32i";
+
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBSfloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Undefined:
+ UNREACHABLE();
+ return "";
+ }
+ }
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/TextureFormatUtils.h b/chromium/third_party/dawn/src/utils/TextureFormatUtils.h
index bbd5f0c1846..2976e94d3fa 100644
--- a/chromium/third_party/dawn/src/utils/TextureFormatUtils.h
+++ b/chromium/third_party/dawn/src/utils/TextureFormatUtils.h
@@ -53,6 +53,9 @@ namespace utils {
const char* GetColorTextureComponentTypePrefix(wgpu::TextureFormat textureFormat);
bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
+
+ uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
+ const char* GetGLSLImageFormatQualifier(wgpu::TextureFormat textureFormat);
} // namespace utils
#endif
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
index 0d9b1f93fa9..686b223f35a 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
@@ -17,11 +17,14 @@
#include "common/Assert.h"
#include "common/Constants.h"
#include "common/Log.h"
+#include "common/Math.h"
+#include "utils/TextureFormatUtils.h"
#include <shaderc/shaderc.hpp>
#include <cstring>
#include <iomanip>
+#include <mutex>
#include <sstream>
namespace utils {
@@ -160,9 +163,9 @@ namespace utils {
wgpu::BufferDescriptor descriptor;
descriptor.size = size;
descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
-
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
- buffer.SetSubData(0, size, data);
+
+ device.GetDefaultQueue().WriteBuffer(buffer, 0, data, size);
return buffer;
}
@@ -251,7 +254,6 @@ namespace utils {
descriptor.size.width = width;
descriptor.size.height = height;
descriptor.size.depth = 1;
- descriptor.arrayLayerCount = 1;
descriptor.sampleCount = 1;
descriptor.format = BasicRenderPass::kDefaultColorFormat;
descriptor.mipLevelCount = 1;
@@ -276,12 +278,10 @@ namespace utils {
wgpu::TextureCopyView CreateTextureCopyView(wgpu::Texture texture,
uint32_t mipLevel,
- uint32_t arrayLayer,
wgpu::Origin3D origin) {
wgpu::TextureCopyView textureCopyView;
textureCopyView.texture = texture;
textureCopyView.mipLevel = mipLevel;
- textureCopyView.arrayLayer = arrayLayer;
textureCopyView.origin = origin;
return textureCopyView;
@@ -374,4 +374,47 @@ namespace utils {
return device.CreateBindGroup(&descriptor);
}
+ uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
+ const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+ return Align(bytesPerTexel * width, kTextureBytesPerRowAlignment);
+ }
+
+ uint32_t GetBytesInBufferTextureCopy(wgpu::TextureFormat format,
+ uint32_t width,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ uint32_t copyArrayLayerCount) {
+ ASSERT(rowsPerImage > 0);
+ const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+ const uint32_t bytesAtLastImage = bytesPerRow * (rowsPerImage - 1) + bytesPerTexel * width;
+ return bytesPerRow * rowsPerImage * (copyArrayLayerCount - 1) + bytesAtLastImage;
+ }
+
+ // TODO(jiawei.shao@intel.com): support compressed texture formats
+ BufferTextureCopyLayout GetBufferTextureCopyLayoutForTexture2DAtLevel(
+ wgpu::TextureFormat format,
+ wgpu::Extent3D textureSizeAtLevel0,
+ uint32_t mipmapLevel,
+ uint32_t rowsPerImage) {
+ BufferTextureCopyLayout layout;
+
+ layout.mipSize = {textureSizeAtLevel0.width >> mipmapLevel,
+ textureSizeAtLevel0.height >> mipmapLevel, textureSizeAtLevel0.depth};
+
+ layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
+
+ uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
+ layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
+
+ layout.byteLength =
+ GetBytesInBufferTextureCopy(format, layout.mipSize.width, layout.bytesPerRow,
+ appliedRowsPerImage, textureSizeAtLevel0.depth);
+
+ const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+ layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
+ layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
+ layout.texelBlockCount = layout.byteLength / bytesPerTexel;
+
+ return layout;
+ }
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
index 92bd518cc80..fdbd8648f77 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.h
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
@@ -53,7 +53,6 @@ namespace utils {
uint32_t rowsPerImage);
wgpu::TextureCopyView CreateTextureCopyView(wgpu::Texture texture,
uint32_t level,
- uint32_t slice,
wgpu::Origin3D origin);
struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
@@ -130,6 +129,28 @@ namespace utils {
const wgpu::BindGroupLayout& layout,
std::initializer_list<BindingInitializationHelper> entriesInitializer);
+ struct BufferTextureCopyLayout {
+ uint64_t byteLength;
+ uint64_t texelBlockCount;
+ uint32_t bytesPerRow;
+ uint32_t texelBlocksPerRow;
+ uint32_t bytesPerImage;
+ uint32_t texelBlocksPerImage;
+ wgpu::Extent3D mipSize;
+ };
+
+ uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width);
+ uint32_t GetBytesInBufferTextureCopy(wgpu::TextureFormat format,
+ uint32_t width,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ uint32_t copyArrayLayerCount);
+ BufferTextureCopyLayout GetBufferTextureCopyLayoutForTexture2DAtLevel(
+ wgpu::TextureFormat format,
+ wgpu::Extent3D textureSizeAtLevel0,
+ uint32_t mipmapLevel,
+ uint32_t rowsPerImage);
+
} // namespace utils
#endif // UTILS_DAWNHELPERS_H_