summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn/src
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-01-23 17:21:03 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-01-23 16:25:15 +0000
commitc551f43206405019121bd2b2c93714319a0a3300 (patch)
tree1f48c30631c421fd4bbb3c36da20183c8a2ed7d7 /chromium/third_party/dawn/src
parent7961cea6d1041e3e454dae6a1da660b453efd238 (diff)
downloadqtwebengine-chromium-c551f43206405019121bd2b2c93714319a0a3300.tar.gz
BASELINE: Update Chromium to 79.0.3945.139
Change-Id: I336b7182fab9bca80b709682489c07db112eaca5 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/dawn/src')
-rw-r--r--chromium/third_party/dawn/src/common/BUILD.gn10
-rw-r--r--chromium/third_party/dawn/src/common/Platform.h12
-rw-r--r--chromium/third_party/dawn/src/common/Result.h19
-rw-r--r--chromium/third_party/dawn/src/common/vulkan_platform.h5
-rw-r--r--chromium/third_party/dawn/src/dawn/BUILD.gn78
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h138
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyAllocator.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyAllocator.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp104
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h73
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.cpp13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp108
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DawnNative.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.cpp75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.h29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp83
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DynamicUploader.h29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp115
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScope.h67
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.cpp46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.h42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/MemoryAllocator.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.cpp21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RefCounted.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceHeap.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp (renamed from chromium/third_party/dawn/src/dawn_native/RingBuffer.cpp)102
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h (renamed from chromium/third_party/dawn/src/dawn_native/RingBuffer.h)41
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp30
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.cpp21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp583
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp73
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h38
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp (renamed from chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapD3D12.cpp)19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h28
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp87
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h44
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp177
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp68
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h62
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h (renamed from chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapD3D12.h)19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp125
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm343
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp287
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp159
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp204
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandRecordingContext.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp263
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ExternalHandle.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.cpp119
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.h43
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp51
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.cpp26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.h36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp165
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp110
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp138
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireClient.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireServer.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerInlineMemoryTransferService.cpp2
-rw-r--r--chromium/third_party/dawn/src/fuzzers/BUILD.gn5
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_proc.h36
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_wsi.h1
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/DawnNative.h4
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/Wire.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireClient.h13
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireServer.h5
-rw-r--r--chromium/third_party/dawn/src/utils/BackendBinding.h2
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp5
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h12
-rw-r--r--chromium/third_party/dawn/src/utils/DawnHelpers.cpp29
-rw-r--r--chromium/third_party/dawn/src/utils/DawnHelpers.h7
-rw-r--r--chromium/third_party/dawn/src/utils/Glfw3Fuchsia.cpp100
-rw-r--r--chromium/third_party/dawn/src/utils/MetalBinding.mm2
188 files changed, 4415 insertions, 1995 deletions
diff --git a/chromium/third_party/dawn/src/common/BUILD.gn b/chromium/third_party/dawn/src/common/BUILD.gn
index 9093e8240ac..c362b4c54e8 100644
--- a/chromium/third_party/dawn/src/common/BUILD.gn
+++ b/chromium/third_party/dawn/src/common/BUILD.gn
@@ -31,13 +31,16 @@ if (build_with_chromium) {
config("dawn_public_include_dirs") {
include_dirs = [
- "${target_gen_dir}/../..",
+ "${target_gen_dir}/../../src/include",
"${dawn_root}/src/include",
]
}
config("dawn_internal") {
- include_dirs = [ "${dawn_root}/src" ]
+ include_dirs = [
+ "${target_gen_dir}/../../src",
+ "${dawn_root}/src",
+ ]
defines = []
if (dawn_always_assert || dcheck_always_on || is_debug) {
@@ -76,7 +79,7 @@ config("dawn_internal") {
# This GN file is discovered by all Chromium builds, but common doesn't support
# all of Chromium's OSes so we explicitly make the target visible only on
# systems we know Dawn is able to compile on.
-if (is_win || is_linux || is_mac) {
+if (is_win || is_linux || is_mac || is_fuchsia) {
static_library("common") {
sources = [
"Assert.cpp",
@@ -99,6 +102,7 @@ if (is_win || is_linux || is_mac) {
"SwapChainUtils.h",
"vulkan_platform.h",
"windows_with_undefs.h",
+ "xlib_with_undefs.h",
]
public_configs = [ ":dawn_internal" ]
diff --git a/chromium/third_party/dawn/src/common/Platform.h b/chromium/third_party/dawn/src/common/Platform.h
index 3951362f443..f6d7fc5d056 100644
--- a/chromium/third_party/dawn/src/common/Platform.h
+++ b/chromium/third_party/dawn/src/common/Platform.h
@@ -17,15 +17,27 @@
#if defined(_WIN32) || defined(_WIN64)
# define DAWN_PLATFORM_WINDOWS 1
+
#elif defined(__linux__)
# define DAWN_PLATFORM_LINUX 1
# define DAWN_PLATFORM_POSIX 1
+
#elif defined(__APPLE__)
# define DAWN_PLATFORM_APPLE 1
# define DAWN_PLATFORM_POSIX 1
+# include <TargetConditionals.h>
+# if TARGET_OS_IPHONE
+# define DAWN_PLATFORM_IOS
+# elif TARGET_OS_MAC
+# define DAWN_PLATFORM_MACOS
+# else
+# error "Unsupported Apple platform."
+# endif
+
#elif defined(__Fuchsia__)
# define DAWN_PLATFORM_FUCHSIA 1
# define DAWN_PLATFORM_POSIX 1
+
#else
# error "Unsupported platform."
#endif
diff --git a/chromium/third_party/dawn/src/common/Result.h b/chromium/third_party/dawn/src/common/Result.h
index cd63a7a848c..3e33052fb35 100644
--- a/chromium/third_party/dawn/src/common/Result.h
+++ b/chromium/third_party/dawn/src/common/Result.h
@@ -20,6 +20,7 @@
#include <cstddef>
#include <cstdint>
+#include <type_traits>
#include <utility>
// Result<T, E> is the following sum type (Haskell notation):
@@ -117,8 +118,11 @@ class DAWN_NO_DISCARD Result<T*, E*> {
Result(T* success);
Result(E* error);
- Result(Result<T*, E*>&& other);
- Result<T*, E*>& operator=(Result<T*, E>&& other);
+ // Support returning a Result<T*, E*> from a Result<TChild*, E*>
+ template <typename TChild>
+ Result(Result<TChild*, E*>&& other);
+ template <typename TChild>
+ Result<T*, E*>& operator=(Result<TChild*, E>&& other);
~Result();
@@ -129,6 +133,9 @@ class DAWN_NO_DISCARD Result<T*, E*> {
E* AcquireError();
private:
+ template <typename T2, typename E2>
+ friend class Result;
+
intptr_t mPayload = detail::kEmptyPayload;
};
@@ -265,13 +272,17 @@ Result<T*, E*>::Result(E* error) : mPayload(detail::MakePayload(error, detail::E
}
template <typename T, typename E>
-Result<T*, E*>::Result(Result<T*, E*>&& other) : mPayload(other.mPayload) {
+template <typename TChild>
+Result<T*, E*>::Result(Result<TChild*, E*>&& other) : mPayload(other.mPayload) {
other.mPayload = detail::kEmptyPayload;
+ static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value, "");
}
template <typename T, typename E>
-Result<T*, E*>& Result<T*, E*>::operator=(Result<T*, E>&& other) {
+template <typename TChild>
+Result<T*, E*>& Result<T*, E*>::operator=(Result<TChild*, E>&& other) {
ASSERT(mPayload == detail::kEmptyPayload);
+ static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value, "");
mPayload = other.mPayload;
other.mPayload = detail::kEmptyPayload;
return *this;
diff --git a/chromium/third_party/dawn/src/common/vulkan_platform.h b/chromium/third_party/dawn/src/common/vulkan_platform.h
index 03176628d0d..0011a3103c1 100644
--- a/chromium/third_party/dawn/src/common/vulkan_platform.h
+++ b/chromium/third_party/dawn/src/common/vulkan_platform.h
@@ -161,4 +161,9 @@ class alignas(kNativeVkHandleAlignment) VkNonDispatchableHandle {
# include "common/xlib_with_undefs.h"
#endif
+// Include Fuchsia-specific definitions that are not upstreamed yet.
+#if defined(DAWN_PLATFORM_FUCHSIA)
+# include <vulkan/vulkan_fuchsia_extras.h>
+#endif
+
#endif // COMMON_VULKANPLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn/BUILD.gn b/chromium/third_party/dawn/src/dawn/BUILD.gn
index 7e2efade3ec..b0c449e8459 100644
--- a/chromium/third_party/dawn/src/dawn/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/BUILD.gn
@@ -14,8 +14,16 @@
import("../../scripts/dawn_overrides_with_defaults.gni")
-import("${dawn_root}/scripts/dawn_component.gni")
import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_component.gni")
+
+# Temporary group to not break Chromium `gn check` on Dawn CQ.
+group("libdawn") {
+ deps = [
+ ":dawncpp",
+ ":libdawn_proc",
+ ]
+}
###############################################################################
# Dawn headers
@@ -23,51 +31,85 @@ import("${dawn_root}/generator/dawn_generator.gni")
dawn_json_generator("dawn_headers_gen") {
target = "dawn_headers"
-
- # Generate as if we were in the main BUILD.gn because that was historically
- # the case and we can't move generated files without clobbering the build.
- custom_gen_dir = "${target_gen_dir}/../.."
outputs = [
- "dawn/dawncpp.h",
- "dawn/dawn.h",
+ "src/include/dawn/dawn.h",
+ "src/include/dawn/dawn_proc_table.h",
]
}
source_set("dawn_headers") {
all_dependent_configs = [ "${dawn_root}/src/common:dawn_public_include_dirs" ]
- deps = [
+ public_deps = [
":dawn_headers_gen",
]
sources = get_target_outputs(":dawn_headers_gen")
sources += [
- "${dawn_root}/src/include/dawn/EnumClassBitmasks.h",
"${dawn_root}/src/include/dawn/dawn_export.h",
"${dawn_root}/src/include/dawn/dawn_wsi.h",
]
}
###############################################################################
-# libdawn
+# Dawn C++ headers
+###############################################################################
+
+dawn_json_generator("dawncpp_headers_gen") {
+ target = "dawncpp_headers"
+ outputs = [
+ "src/include/dawn/dawncpp.h",
+ ]
+}
+
+source_set("dawncpp_headers") {
+ public_deps = [
+ ":dawn_headers",
+ ":dawncpp_headers_gen",
+ ]
+
+ sources = get_target_outputs(":dawncpp_headers_gen")
+ sources += [ "${dawn_root}/src/include/dawn/EnumClassBitmasks.h" ]
+}
+
+###############################################################################
+# Dawn C++ wrapper
###############################################################################
-dawn_json_generator("libdawn_gen") {
- target = "libdawn"
+dawn_json_generator("dawncpp_gen") {
+ target = "dawncpp"
outputs = [
- "dawn/dawncpp.cpp",
- "dawn/dawn.c",
+ "src/dawn/dawncpp.cpp",
+ ]
+}
+
+source_set("dawncpp") {
+ deps = [
+ ":dawncpp_gen",
+ ":dawncpp_headers",
]
+ sources = get_target_outputs(":dawncpp_gen")
}
-dawn_component("libdawn") {
+###############################################################################
+# libdawn_proc
+###############################################################################
+
+dawn_json_generator("libdawn_proc_gen") {
+ target = "dawn_proc"
+ outputs = [
+ "src/dawn/dawn_proc.c",
+ ]
+}
+
+dawn_component("libdawn_proc") {
DEFINE_PREFIX = "DAWN"
public_deps = [
":dawn_headers",
]
-
deps = [
- ":libdawn_gen",
+ ":libdawn_proc_gen",
]
- sources = get_target_outputs(":libdawn_gen")
+ sources = get_target_outputs(":libdawn_proc_gen")
+ sources += [ "${dawn_root}/src/include/dawn/dawn_proc.h" ]
}
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
index d21b01885f9..3cd6621404d 100644
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
@@ -34,9 +34,8 @@ namespace dawn_native {
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
: mSampleCount(descriptor->sampleCount) {
for (uint32_t i = 0; i < descriptor->colorStateCount; ++i) {
- ASSERT(descriptor->colorStates[i] != nullptr);
mColorAttachmentsSet.set(i);
- mColorFormats[i] = descriptor->colorStates[i]->format;
+ mColorFormats[i] = descriptor->colorStates[i].format;
}
if (descriptor->depthStencilState != nullptr) {
mDepthStencilFormat = descriptor->depthStencilState->format;
@@ -45,7 +44,7 @@ namespace dawn_native {
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
- TextureViewBase* attachment = descriptor->colorAttachments[i]->attachment;
+ TextureViewBase* attachment = descriptor->colorAttachments[i].attachment;
mColorAttachmentsSet.set(i);
mColorFormats[i] = attachment->GetFormat().format;
if (mSampleCount == 0) {
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
index 15c971f5357..ceaf913b9d4 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
@@ -65,7 +65,8 @@ namespace dawn_native {
const BindGroupBinding& binding,
dawn::TextureUsage requiredUsage,
bool multisampledBinding,
- dawn::TextureComponentType requiredComponentType) {
+ dawn::TextureComponentType requiredComponentType,
+ dawn::TextureViewDimension requiredDimension) {
if (binding.textureView == nullptr || binding.sampler != nullptr ||
binding.buffer != nullptr) {
return DAWN_VALIDATION_ERROR("expected texture binding");
@@ -86,6 +87,10 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("texture component type usage mismatch");
}
+ if (binding.textureView->GetDimension() != requiredDimension) {
+ return DAWN_VALIDATION_ERROR("texture view dimension mismatch");
+ }
+
return {};
}
@@ -145,10 +150,10 @@ namespace dawn_native {
DAWN_TRY(ValidateBufferBinding(device, binding, dawn::BufferUsage::Storage));
break;
case dawn::BindingType::SampledTexture:
- DAWN_TRY(
- ValidateTextureBinding(device, binding, dawn::TextureUsage::Sampled,
- layoutInfo.multisampled[bindingIndex],
- layoutInfo.textureComponentTypes[bindingIndex]));
+ DAWN_TRY(ValidateTextureBinding(device, binding, dawn::TextureUsage::Sampled,
+ layoutInfo.multisampled[bindingIndex],
+ layoutInfo.textureComponentTypes[bindingIndex],
+ layoutInfo.textureDimensions[bindingIndex]));
break;
case dawn::BindingType::Sampler:
DAWN_TRY(ValidateSamplerBinding(device, binding));
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
index 57bc2ecf033..2001b6be70c 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
@@ -38,6 +38,10 @@ namespace dawn_native {
DAWN_TRY(ValidateBindingType(binding.type));
DAWN_TRY(ValidateTextureComponentType(binding.textureComponentType));
+ if (binding.textureDimension != dawn::TextureViewDimension::Undefined) {
+ DAWN_TRY(ValidateTextureViewDimension(binding.textureDimension));
+ }
+
if (binding.binding >= kMaxBindingsPerGroup) {
return DAWN_VALIDATION_ERROR("some binding index exceeds the maximum value");
}
@@ -45,24 +49,20 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("some binding index was specified more than once");
}
- if (binding.visibility == dawn::ShaderStage::None) {
- return DAWN_VALIDATION_ERROR("Visibility of bindings can't be None");
- }
-
switch (binding.type) {
case dawn::BindingType::UniformBuffer:
- if (binding.dynamic) {
+ if (binding.hasDynamicOffset) {
++dynamicUniformBufferCount;
}
break;
case dawn::BindingType::StorageBuffer:
- if (binding.dynamic) {
+ if (binding.hasDynamicOffset) {
++dynamicStorageBufferCount;
}
break;
case dawn::BindingType::SampledTexture:
case dawn::BindingType::Sampler:
- if (binding.dynamic) {
+ if (binding.hasDynamicOffset) {
return DAWN_VALIDATION_ERROR("Samplers and textures cannot be dynamic");
}
break;
@@ -96,11 +96,11 @@ namespace dawn_native {
namespace {
size_t HashBindingInfo(const BindGroupLayoutBase::LayoutBindingInfo& info) {
size_t hash = Hash(info.mask);
- HashCombine(&hash, info.dynamic, info.multisampled);
+ HashCombine(&hash, info.hasDynamicOffset, info.multisampled);
for (uint32_t binding : IterateBitSet(info.mask)) {
HashCombine(&hash, info.visibilities[binding], info.types[binding],
- info.textureComponentTypes[binding]);
+ info.textureComponentTypes[binding], info.textureDimensions[binding]);
}
return hash;
@@ -108,14 +108,16 @@ namespace dawn_native {
bool operator==(const BindGroupLayoutBase::LayoutBindingInfo& a,
const BindGroupLayoutBase::LayoutBindingInfo& b) {
- if (a.mask != b.mask || a.dynamic != b.dynamic || a.multisampled != b.multisampled) {
+ if (a.mask != b.mask || a.hasDynamicOffset != b.hasDynamicOffset ||
+ a.multisampled != b.multisampled) {
return false;
}
for (uint32_t binding : IterateBitSet(a.mask)) {
if ((a.visibilities[binding] != b.visibilities[binding]) ||
(a.types[binding] != b.types[binding]) ||
- (a.textureComponentTypes[binding] != b.textureComponentTypes[binding])) {
+ (a.textureComponentTypes[binding] != b.textureComponentTypes[binding]) ||
+ (a.textureDimensions[binding] != b.textureDimensions[binding])) {
return false;
}
}
@@ -138,8 +140,13 @@ namespace dawn_native {
mBindingInfo.types[index] = binding.type;
mBindingInfo.textureComponentTypes[index] = binding.textureComponentType;
- if (binding.dynamic) {
- mBindingInfo.dynamic.set(index);
+ if (binding.textureDimension == dawn::TextureViewDimension::Undefined) {
+ mBindingInfo.textureDimensions[index] = dawn::TextureViewDimension::e2D;
+ } else {
+ mBindingInfo.textureDimensions[index] = binding.textureDimension;
+ }
+ if (binding.hasDynamicOffset) {
+ mBindingInfo.hasDynamicOffset.set(index);
switch (binding.type) {
case dawn::BindingType::UniformBuffer:
++mDynamicUniformBufferCount;
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
index 1bb747184e0..c241cabb25d 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
@@ -43,7 +43,8 @@ namespace dawn_native {
std::array<dawn::ShaderStage, kMaxBindingsPerGroup> visibilities;
std::array<dawn::BindingType, kMaxBindingsPerGroup> types;
std::array<dawn::TextureComponentType, kMaxBindingsPerGroup> textureComponentTypes;
- std::bitset<kMaxBindingsPerGroup> dynamic;
+ std::array<dawn::TextureViewDimension, kMaxBindingsPerGroup> textureDimensions;
+ std::bitset<kMaxBindingsPerGroup> hasDynamicOffset;
std::bitset<kMaxBindingsPerGroup> multisampled;
std::bitset<kMaxBindingsPerGroup> mask;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h b/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
new file mode 100644
index 00000000000..da992a6d7f1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
@@ -0,0 +1,138 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDGROUPTRACKER_H_
+#define DAWNNATIVE_BINDGROUPTRACKER_H_
+
+#include "common/Constants.h"
+#include "dawn_native/BindGroupLayout.h"
+#include "dawn_native/Pipeline.h"
+#include "dawn_native/PipelineLayout.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn_native {
+
+ // Keeps track of the dirty bind groups so they can be lazily applied when we know the
+ // pipeline state or it changes.
+ // |BindGroup| is a template parameter so a backend may provide its backend-specific
+ // type or native handle.
+ // |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
+ // in other backends.
+ template <typename BindGroup, bool CanInheritBindGroups, typename DynamicOffset = uint64_t>
+ class BindGroupTrackerBase {
+ public:
+ void OnSetBindGroup(uint32_t index,
+ BindGroup bindGroup,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets) {
+ ASSERT(index < kMaxBindGroups);
+
+ if (mBindGroupLayoutsMask[index]) {
+ // It is okay to only dirty bind groups that are used by the current pipeline
+ // layout. If the pipeline layout changes, then the bind groups it uses will
+ // become dirty.
+
+ if (mBindGroups[index] != bindGroup) {
+ mDirtyBindGroups.set(index);
+ mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
+ }
+
+ if (dynamicOffsetCount > 0) {
+ mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
+ }
+ }
+
+ mBindGroups[index] = bindGroup;
+ mDynamicOffsetCounts[index] = dynamicOffsetCount;
+ SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
+ }
+
+ void OnSetPipeline(PipelineBase* pipeline) {
+ mPipelineLayout = pipeline->GetLayout();
+ if (mLastAppliedPipelineLayout == mPipelineLayout) {
+ return;
+ }
+
+ // Keep track of the bind group layout mask to avoid marking unused bind groups as
+ // dirty. This also allows us to avoid computing the intersection of the dirty bind
+ // groups and bind group layout mask in Draw or Dispatch which is very hot code.
+ mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
+
+ // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
+ // the first |k| matching bind groups may be inherited.
+ if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
+ // Dirty bind groups that cannot be inherited.
+ std::bitset<kMaxBindGroups> dirtiedGroups =
+ ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
+
+ mDirtyBindGroups |= dirtiedGroups;
+ mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
+
+ // Clear any bind groups not in the mask.
+ mDirtyBindGroups &= mBindGroupLayoutsMask;
+ mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
+ } else {
+ mDirtyBindGroups = mBindGroupLayoutsMask;
+ mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
+ }
+ }
+
+ protected:
+ // The Derived class should call this when it applies bind groups.
+ void DidApply() {
+ // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
+ // will be dirtied again by the next pipeline change.
+ mDirtyBindGroups.reset();
+ mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
+ mLastAppliedPipelineLayout = mPipelineLayout;
+ }
+
+ std::bitset<kMaxBindGroups> mDirtyBindGroups = 0;
+ std::bitset<kMaxBindGroups> mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
+ std::bitset<kMaxBindGroups> mBindGroupLayoutsMask = 0;
+ std::array<BindGroup, kMaxBindGroups> mBindGroups = {};
+ std::array<uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
+ std::array<std::array<DynamicOffset, kMaxBindingsPerGroup>, kMaxBindGroups>
+ mDynamicOffsets = {};
+
+ // |mPipelineLayout| is the current pipeline layout set on the command buffer.
+ // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
+ // to the bind group bindings.
+ PipelineLayoutBase* mPipelineLayout = nullptr;
+ PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
+
+ private:
+ // Vulkan backend use uint32_t as dynamic offsets type, it is not correct.
+ // Vulkan should use VkDeviceSize. Dawn vulkan backend has to handle this.
+ static void SetDynamicOffsets(uint32_t* data,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets) {
+ for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
+ ASSERT(dynamicOffsets[i] <= std::numeric_limits<uint32_t>::max());
+ data[i] = static_cast<uint32_t>(dynamicOffsets[i]);
+ }
+ }
+
+ static void SetDynamicOffsets(uint64_t* data,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets) {
+ memcpy(data, dynamicOffsets, sizeof(uint64_t) * dynamicOffsetCount);
+ }
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_BINDGROUPTRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.cpp
index a96dd883181..38b2dd8e456 100644
--- a/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.cpp
@@ -91,7 +91,7 @@ namespace dawn_native {
break;
}
}
- return INVALID_OFFSET; // No free block exists at any level.
+ return kInvalidOffset; // No free block exists at any level.
}
// Inserts existing free block into the free-list.
@@ -141,7 +141,7 @@ namespace dawn_native {
uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
- return INVALID_OFFSET;
+ return kInvalidOffset;
}
// Compute the level
@@ -152,8 +152,8 @@ namespace dawn_native {
uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
// Error when no free blocks exist (allocator is full)
- if (currBlockLevel == INVALID_OFFSET) {
- return INVALID_OFFSET;
+ if (currBlockLevel == kInvalidOffset) {
+ return kInvalidOffset;
}
// Split free blocks level-by-level.
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.h b/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.h
index df689f3b3db..af826362688 100644
--- a/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.h
@@ -16,13 +16,13 @@
#define DAWNNATIVE_BUDDYALLOCATOR_H_
#include <cstddef>
+#include <cstdint>
+#include <limits>
#include <vector>
namespace dawn_native {
- static constexpr uint64_t INVALID_OFFSET = std::numeric_limits<uint64_t>::max();
-
- // Buddy allocator uses the buddy memory allocation technique to satisify an allocation request.
+ // Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
// Memory is split into halves until just large enough to fit to the request. This
// requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
// returning the starting offset whose size is guaranteed to be greater than or equal to the
@@ -45,6 +45,8 @@ namespace dawn_native {
// For testing purposes only.
uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
+ static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+
private:
uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
@@ -108,4 +110,4 @@ namespace dawn_native {
} // namespace dawn_native
-#endif // DAWNNATIVE_BUDDYALLOCATOR_H_ \ No newline at end of file
+#endif // DAWNNATIVE_BUDDYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
new file mode 100644
index 00000000000..87f4743e1ff
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
@@ -0,0 +1,104 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "dawn_native/BuddyMemoryAllocator.h"
+
+#include "common/Math.h"
+
+namespace dawn_native {
+
+ BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxBlockSize,
+ uint64_t memorySize,
+ std::unique_ptr<MemoryAllocator> client)
+ : mMemorySize(memorySize), mBuddyBlockAllocator(maxBlockSize), mClient(std::move(client)) {
+ ASSERT(memorySize <= maxBlockSize);
+ ASSERT(IsPowerOfTwo(mMemorySize));
+ ASSERT(maxBlockSize % mMemorySize == 0);
+
+ mTrackedSubAllocations.resize(maxBlockSize / mMemorySize);
+ }
+
+ uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
+ ASSERT(offset != BuddyAllocator::kInvalidOffset);
+ return offset / mMemorySize;
+ }
+
+ ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
+ uint64_t alignment,
+ int memoryFlags) {
+ ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
+
+ // Allocation cannot exceed the memory size.
+ if (allocationSize == 0 || allocationSize > mMemorySize) {
+ return invalidAllocation;
+ }
+
+ // Attempt to sub-allocate a block of the requested size.
+ const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
+ if (blockOffset == BuddyAllocator::kInvalidOffset) {
+ return invalidAllocation;
+ }
+
+ const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
+ if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+ // Transfer ownership to this allocator
+ std::unique_ptr<ResourceHeapBase> memory;
+ DAWN_TRY_ASSIGN(memory, mClient->Allocate(mMemorySize, memoryFlags));
+ mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
+ }
+
+ mTrackedSubAllocations[memoryIndex].refcount++;
+
+ AllocationInfo info;
+ info.mBlockOffset = blockOffset;
+ info.mMethod = AllocationMethod::kSubAllocated;
+
+ // Allocation offset is always local to the memory.
+ const uint64_t memoryOffset = blockOffset % mMemorySize;
+
+ return ResourceMemoryAllocation{
+ info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
+ } // namespace dawn_native
+
+ void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
+ const AllocationInfo info = allocation.GetInfo();
+
+ ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
+
+ const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
+
+ ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
+
+ mTrackedSubAllocations[memoryIndex].refcount--;
+
+ if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+ mClient->Deallocate(std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
+ }
+
+ mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
+ }
+
+ uint64_t BuddyMemoryAllocator::GetMemorySize() const {
+ return mMemorySize;
+ }
+
+ uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
+ uint64_t count = 0;
+ for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
+ if (allocation.refcount > 0) {
+ count++;
+ }
+ }
+ return count;
+ }
+} // namespace dawn_native \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h
new file mode 100644
index 00000000000..b31b40074b4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h
@@ -0,0 +1,73 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
+#define DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
+
+#include <vector>
+
+#include "dawn_native/BuddyAllocator.h"
+#include "dawn_native/MemoryAllocator.h"
+#include "dawn_native/ResourceMemoryAllocation.h"
+
+namespace dawn_native {
+ // BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
+ // memory created by MemoryAllocator clients. It creates a very large buddy system
+ // where backing device memory blocks equal a specified level in the system.
+ //
+ // Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
+ // memory index and should the memory not exist, it is created. If two sub-allocations share the
+ // same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
+ // release the other prematurely.
+ //
+ // The device will only create up to Log2(kMaxResourceSize) allocators and can prefer speed
+ // over memory footprint by selecting an allocator with a higher memory threshold which results
+ // in pre-allocating more memory.
+ //
+ // The resource allocation is guaranteed by the device to have compatible memory flags.
+ class BuddyMemoryAllocator {
+ public:
+ BuddyMemoryAllocator(uint64_t maxBlockSize,
+ uint64_t memorySize,
+ std::unique_ptr<MemoryAllocator> client);
+ ~BuddyMemoryAllocator() = default;
+
+ ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
+ uint64_t alignment,
+ int memoryFlags = 0);
+ void Deallocate(const ResourceMemoryAllocation& allocation);
+
+ uint64_t GetMemorySize() const;
+
+ // For testing purposes.
+ uint64_t ComputeTotalNumOfHeapsForTesting() const;
+
+ private:
+ uint64_t GetMemoryIndex(uint64_t offset) const;
+
+ uint64_t mMemorySize = 0;
+
+ BuddyAllocator mBuddyBlockAllocator;
+ std::unique_ptr<MemoryAllocator> mClient;
+
+ struct TrackedSubAllocations {
+ size_t refcount = 0;
+ std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
+ };
+
+ std::vector<TrackedSubAllocations> mTrackedSubAllocations;
+ };
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
index bd5b2fb1615..21bc305fa1f 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
@@ -167,9 +167,7 @@ namespace dawn_native {
// error buffer.
// TODO(enga): Suballocate and reuse memory from a larger staging buffer so we don't create
// many small buffers.
- DynamicUploader* uploader = nullptr;
- DAWN_TRY_ASSIGN(uploader, GetDevice()->GetDynamicUploader());
- DAWN_TRY_ASSIGN(mStagingBuffer, uploader->CreateStagingBuffer(GetSize()));
+ DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetSize()));
ASSERT(mStagingBuffer->GetMappedPointer() != nullptr);
*mappedPointer = reinterpret_cast<uint8_t*>(mStagingBuffer->GetMappedPointer());
@@ -252,11 +250,11 @@ namespace dawn_native {
}
MaybeError BufferBase::SetSubDataImpl(uint32_t start, uint32_t count, const void* data) {
- DynamicUploader* uploader = nullptr;
- DAWN_TRY_ASSIGN(uploader, GetDevice()->GetDynamicUploader());
+ DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, uploader->Allocate(count));
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(count, GetDevice()->GetPendingCommandSerial()));
ASSERT(uploadHandle.mappedBuffer != nullptr);
memcpy(uploadHandle.mappedBuffer, data, count);
@@ -311,8 +309,7 @@ namespace dawn_native {
ASSERT(mStagingBuffer);
DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetSize()));
- DynamicUploader* uploader = nullptr;
- DAWN_TRY_ASSIGN(uploader, GetDevice()->GetDynamicUploader());
+ DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
index 26970410939..943b67be75e 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
@@ -152,10 +152,8 @@ namespace dawn_native {
mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
}
- void CommandBufferStateTracker::SetVertexBuffer(uint32_t start, uint32_t count) {
- for (uint32_t i = 0; i < count; ++i) {
- mInputsSet.set(start + i);
- }
+ void CommandBufferStateTracker::SetVertexBuffer(uint32_t slot) {
+ mInputsSet.set(slot);
}
void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
index 2009dd73408..5be9dcae9ef 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
@@ -38,7 +38,7 @@ namespace dawn_native {
void SetRenderPipeline(RenderPipelineBase* pipeline);
void SetBindGroup(uint32_t index, BindGroupBase* bindgroup);
void SetIndexBuffer();
- void SetVertexBuffer(uint32_t start, uint32_t count);
+ void SetVertexBuffer(uint32_t slot);
static constexpr size_t kNumAspects = 4;
using ValidationAspects = std::bitset<kNumAspects>;
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
index 832d27a667e..e0a9cd2e4ce 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
@@ -310,39 +310,40 @@ namespace dawn_native {
MaybeError ValidateResolveTarget(
const DeviceBase* device,
- const RenderPassColorAttachmentDescriptor* colorAttachment) {
- if (colorAttachment->resolveTarget == nullptr) {
+ const RenderPassColorAttachmentDescriptor& colorAttachment) {
+ if (colorAttachment.resolveTarget == nullptr) {
return {};
}
- DAWN_TRY(device->ValidateObject(colorAttachment->resolveTarget));
+ const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
+ const TextureViewBase* attachment = colorAttachment.attachment;
+ DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
- if (!colorAttachment->attachment->GetTexture()->IsMultisampledTexture()) {
+ if (!attachment->GetTexture()->IsMultisampledTexture()) {
return DAWN_VALIDATION_ERROR(
"Cannot set resolve target when the sample count of the color attachment is 1");
}
- if (colorAttachment->resolveTarget->GetTexture()->IsMultisampledTexture()) {
+ if (resolveTarget->GetTexture()->IsMultisampledTexture()) {
return DAWN_VALIDATION_ERROR("Cannot use multisampled texture as resolve target");
}
- if (colorAttachment->resolveTarget->GetLayerCount() > 1) {
+ if (resolveTarget->GetLayerCount() > 1) {
return DAWN_VALIDATION_ERROR(
"The array layer count of the resolve target must be 1");
}
- if (colorAttachment->resolveTarget->GetLevelCount() > 1) {
+ if (resolveTarget->GetLevelCount() > 1) {
return DAWN_VALIDATION_ERROR("The mip level count of the resolve target must be 1");
}
- uint32_t colorAttachmentBaseMipLevel = colorAttachment->attachment->GetBaseMipLevel();
- const Extent3D& colorTextureSize = colorAttachment->attachment->GetTexture()->GetSize();
+ uint32_t colorAttachmentBaseMipLevel = attachment->GetBaseMipLevel();
+ const Extent3D& colorTextureSize = attachment->GetTexture()->GetSize();
uint32_t colorAttachmentWidth = colorTextureSize.width >> colorAttachmentBaseMipLevel;
uint32_t colorAttachmentHeight = colorTextureSize.height >> colorAttachmentBaseMipLevel;
- uint32_t resolveTargetBaseMipLevel = colorAttachment->resolveTarget->GetBaseMipLevel();
- const Extent3D& resolveTextureSize =
- colorAttachment->resolveTarget->GetTexture()->GetSize();
+ uint32_t resolveTargetBaseMipLevel = resolveTarget->GetBaseMipLevel();
+ const Extent3D& resolveTextureSize = resolveTarget->GetTexture()->GetSize();
uint32_t resolveTargetWidth = resolveTextureSize.width >> resolveTargetBaseMipLevel;
uint32_t resolveTargetHeight = resolveTextureSize.height >> resolveTargetBaseMipLevel;
if (colorAttachmentWidth != resolveTargetWidth ||
@@ -351,9 +352,8 @@ namespace dawn_native {
"The size of the resolve target must be the same as the color attachment");
}
- dawn::TextureFormat resolveTargetFormat =
- colorAttachment->resolveTarget->GetFormat().format;
- if (resolveTargetFormat != colorAttachment->attachment->GetFormat().format) {
+ dawn::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
+ if (resolveTargetFormat != attachment->GetFormat().format) {
return DAWN_VALIDATION_ERROR(
"The format of the resolve target must be the same as the color attachment");
}
@@ -363,15 +363,13 @@ namespace dawn_native {
MaybeError ValidateRenderPassColorAttachment(
const DeviceBase* device,
- const RenderPassColorAttachmentDescriptor* colorAttachment,
+ const RenderPassColorAttachmentDescriptor& colorAttachment,
uint32_t* width,
uint32_t* height,
uint32_t* sampleCount) {
- DAWN_ASSERT(colorAttachment != nullptr);
+ DAWN_TRY(device->ValidateObject(colorAttachment.attachment));
- DAWN_TRY(device->ValidateObject(colorAttachment->attachment));
-
- const TextureViewBase* attachment = colorAttachment->attachment;
+ const TextureViewBase* attachment = colorAttachment.attachment;
if (!attachment->GetFormat().IsColor() || !attachment->GetFormat().isRenderable) {
return DAWN_VALIDATION_ERROR(
"The format of the texture view used as color attachment is not color "
@@ -406,6 +404,12 @@ namespace dawn_native {
"depth stencil format");
}
+ // This validates that the depth storeOp and stencil storeOps are the same
+ if (depthStencilAttachment->depthStoreOp != depthStencilAttachment->stencilStoreOp) {
+ return DAWN_VALIDATION_ERROR(
+ "The depth storeOp and stencil storeOp are not the same");
+ }
+
// *sampleCount == 0 must only happen when there is no color attachment. In that case we
// do not need to validate the sample count of the depth stencil attachment.
const uint32_t depthStencilSampleCount = attachment->GetTexture()->GetSampleCount();
@@ -517,13 +521,13 @@ namespace dawn_native {
cmd->attachmentState = device->GetOrCreateAttachmentState(descriptor);
for (uint32_t i : IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
- cmd->colorAttachments[i].view = descriptor->colorAttachments[i]->attachment;
+ cmd->colorAttachments[i].view = descriptor->colorAttachments[i].attachment;
cmd->colorAttachments[i].resolveTarget =
- descriptor->colorAttachments[i]->resolveTarget;
- cmd->colorAttachments[i].loadOp = descriptor->colorAttachments[i]->loadOp;
- cmd->colorAttachments[i].storeOp = descriptor->colorAttachments[i]->storeOp;
+ descriptor->colorAttachments[i].resolveTarget;
+ cmd->colorAttachments[i].loadOp = descriptor->colorAttachments[i].loadOp;
+ cmd->colorAttachments[i].storeOp = descriptor->colorAttachments[i].storeOp;
cmd->colorAttachments[i].clearColor =
- descriptor->colorAttachments[i]->clearColor;
+ descriptor->colorAttachments[i].clearColor;
}
if (cmd->attachmentState->HasDepthStencilAttachment()) {
@@ -667,6 +671,40 @@ namespace dawn_native {
});
}
+ void CommandEncoderBase::InsertDebugMarker(const char* groupLabel) {
+ mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ InsertDebugMarkerCmd* cmd =
+ allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ return {};
+ });
+ }
+
+ void CommandEncoderBase::PopDebugGroup() {
+ mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+
+ return {};
+ });
+ }
+
+ void CommandEncoderBase::PushDebugGroup(const char* groupLabel) {
+ mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ PushDebugGroupCmd* cmd =
+ allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ return {};
+ });
+ }
+
CommandBufferBase* CommandEncoderBase::Finish(const CommandBufferDescriptor* descriptor) {
TRACE_EVENT0(GetDevice()->GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
"CommandEncoderBase::Finish");
@@ -689,6 +727,8 @@ namespace dawn_native {
// encoding context. Subsequent calls to encode commands will generate errors.
DAWN_TRY(mEncodingContext.Finish());
+ uint64_t debugGroupStackSize = 0;
+
CommandIterator* commands = mEncodingContext.GetIterator();
commands->Reset();
@@ -820,11 +860,29 @@ namespace dawn_native {
mResourceUsages.topLevelTextures.insert(copy->destination.texture.Get());
} break;
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
+ commands->NextData<char>(cmd->length + 1);
+ } break;
+
+ case Command::PopDebugGroup: {
+ commands->NextCommand<PopDebugGroupCmd>();
+ DAWN_TRY(ValidateCanPopDebugGroup(debugGroupStackSize));
+ debugGroupStackSize--;
+ } break;
+
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
+ commands->NextData<char>(cmd->length + 1);
+ debugGroupStackSize++;
+ } break;
default:
return DAWN_VALIDATION_ERROR("Command disallowed outside of a pass");
}
}
+ DAWN_TRY(ValidateFinalDebugGroupStackSize(debugGroupStackSize));
+
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
index bcb11370141..6d39ed3ddc0 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
@@ -38,6 +38,7 @@ namespace dawn_native {
// Dawn API
ComputePassEncoderBase* BeginComputePass(const ComputePassDescriptor* descriptor);
RenderPassEncoderBase* BeginRenderPass(const RenderPassDescriptor* descriptor);
+
void CopyBufferToBuffer(BufferBase* source,
uint64_t sourceOffset,
BufferBase* destination,
@@ -52,6 +53,11 @@ namespace dawn_native {
void CopyTextureToTexture(const TextureCopyView* source,
const TextureCopyView* destination,
const Extent3D* copySize);
+
+ void InsertDebugMarker(const char* groupLabel);
+ void PopDebugGroup();
+ void PushDebugGroup(const char* groupLabel);
+
CommandBufferBase* Finish(const CommandBufferDescriptor* descriptor);
private:
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
index 2aa73234923..c249a337899 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
@@ -26,29 +26,6 @@ namespace dawn_native {
namespace {
- inline MaybeError PushDebugMarkerStack(unsigned int* counter) {
- *counter += 1;
- return {};
- }
-
- inline MaybeError PopDebugMarkerStack(unsigned int* counter) {
- if (*counter == 0) {
- return DAWN_VALIDATION_ERROR("Pop must be balanced by a corresponding Push.");
- } else {
- *counter -= 1;
- }
-
- return {};
- }
-
- inline MaybeError ValidateDebugGroups(const unsigned int counter) {
- if (counter != 0) {
- return DAWN_VALIDATION_ERROR("Each Push must be balanced by a corresponding Pop.");
- }
-
- return {};
- }
-
void TrackBindGroupResourceUsage(BindGroupBase* group,
PassResourceUsageTracker* usageTracker) {
const auto& layoutInfo = group->GetLayout()->GetBindingInfo();
@@ -88,7 +65,7 @@ namespace dawn_native {
PassResourceUsageTracker* usageTracker,
CommandBufferStateTracker* commandBufferState,
const AttachmentState* attachmentState,
- unsigned int* debugGroupStackSize,
+ uint64_t* debugGroupStackSize,
const char* disallowedMessage) {
switch (type) {
case Command::Draw: {
@@ -122,13 +99,14 @@ namespace dawn_native {
case Command::PopDebugGroup: {
commands->NextCommand<PopDebugGroupCmd>();
- DAWN_TRY(PopDebugMarkerStack(debugGroupStackSize));
+ DAWN_TRY(ValidateCanPopDebugGroup(*debugGroupStackSize));
+ *debugGroupStackSize -= 1;
} break;
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
- DAWN_TRY(PushDebugMarkerStack(debugGroupStackSize));
+ *debugGroupStackSize += 1;
} break;
case Command::SetRenderPipeline: {
@@ -158,15 +136,11 @@ namespace dawn_native {
commandBufferState->SetIndexBuffer();
} break;
- case Command::SetVertexBuffers: {
- SetVertexBuffersCmd* cmd = commands->NextCommand<SetVertexBuffersCmd>();
- auto buffers = commands->NextData<Ref<BufferBase>>(cmd->count);
- commands->NextData<uint64_t>(cmd->count);
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
- for (uint32_t i = 0; i < cmd->count; ++i) {
- usageTracker->BufferUsedAs(buffers[i].Get(), dawn::BufferUsage::Vertex);
- }
- commandBufferState->SetVertexBuffer(cmd->startSlot, cmd->count);
+ usageTracker->BufferUsedAs(cmd->buffer.Get(), dawn::BufferUsage::Vertex);
+ commandBufferState->SetVertexBuffer(cmd->slot);
} break;
default:
@@ -178,12 +152,26 @@ namespace dawn_native {
} // namespace
+ MaybeError ValidateCanPopDebugGroup(uint64_t debugGroupStackSize) {
+ if (debugGroupStackSize == 0) {
+ return DAWN_VALIDATION_ERROR("Pop must be balanced by a corresponding Push.");
+ }
+ return {};
+ }
+
+ MaybeError ValidateFinalDebugGroupStackSize(uint64_t debugGroupStackSize) {
+ if (debugGroupStackSize != 0) {
+ return DAWN_VALIDATION_ERROR("Each Push must be balanced by a corresponding Pop.");
+ }
+ return {};
+ }
+
MaybeError ValidateRenderBundle(CommandIterator* commands,
const AttachmentState* attachmentState,
PassResourceUsage* resourceUsage) {
PassResourceUsageTracker usageTracker;
CommandBufferStateTracker commandBufferState;
- unsigned int debugGroupStackSize = 0;
+ uint64_t debugGroupStackSize = 0;
Command type;
while (commands->NextCommandId(&type)) {
@@ -192,7 +180,7 @@ namespace dawn_native {
"Command disallowed inside a render bundle"));
}
- DAWN_TRY(ValidateDebugGroups(debugGroupStackSize));
+ DAWN_TRY(ValidateFinalDebugGroupStackSize(debugGroupStackSize));
DAWN_TRY(usageTracker.ValidateRenderPassUsages());
ASSERT(resourceUsage != nullptr);
*resourceUsage = usageTracker.AcquireResourceUsage();
@@ -205,7 +193,7 @@ namespace dawn_native {
std::vector<PassResourceUsage>* perPassResourceUsages) {
PassResourceUsageTracker usageTracker;
CommandBufferStateTracker commandBufferState;
- unsigned int debugGroupStackSize = 0;
+ uint64_t debugGroupStackSize = 0;
// Track usage of the render pass attachments
for (uint32_t i : IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
@@ -231,8 +219,7 @@ namespace dawn_native {
case Command::EndRenderPass: {
commands->NextCommand<EndRenderPassCmd>();
- DAWN_TRY(ValidateDebugGroups(debugGroupStackSize));
-
+ DAWN_TRY(ValidateFinalDebugGroupStackSize(debugGroupStackSize));
DAWN_TRY(usageTracker.ValidateRenderPassUsages());
ASSERT(perPassResourceUsages != nullptr);
perPassResourceUsages->push_back(usageTracker.AcquireResourceUsage());
@@ -299,7 +286,7 @@ namespace dawn_native {
std::vector<PassResourceUsage>* perPassResourceUsages) {
PassResourceUsageTracker usageTracker;
CommandBufferStateTracker commandBufferState;
- unsigned int debugGroupStackSize = 0;
+ uint64_t debugGroupStackSize = 0;
Command type;
while (commands->NextCommandId(&type)) {
@@ -307,8 +294,7 @@ namespace dawn_native {
case Command::EndComputePass: {
commands->NextCommand<EndComputePassCmd>();
- DAWN_TRY(ValidateDebugGroups(debugGroupStackSize));
-
+ DAWN_TRY(ValidateFinalDebugGroupStackSize(debugGroupStackSize));
DAWN_TRY(usageTracker.ValidateComputePassUsages());
ASSERT(perPassResourceUsages != nullptr);
perPassResourceUsages->push_back(usageTracker.AcquireResourceUsage());
@@ -334,13 +320,14 @@ namespace dawn_native {
case Command::PopDebugGroup: {
commands->NextCommand<PopDebugGroupCmd>();
- DAWN_TRY(PopDebugMarkerStack(&debugGroupStackSize));
+ DAWN_TRY(ValidateCanPopDebugGroup(debugGroupStackSize));
+ debugGroupStackSize--;
} break;
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
- DAWN_TRY(PushDebugMarkerStack(&debugGroupStackSize));
+ debugGroupStackSize++;
} break;
case Command::SetComputePipeline: {
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
index c90343c4cdb..b5a14934abd 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
@@ -26,6 +26,9 @@ namespace dawn_native {
struct BeginRenderPassCmd;
struct PassResourceUsage;
+ MaybeError ValidateCanPopDebugGroup(uint64_t debugGroupStackSize);
+ MaybeError ValidateFinalDebugGroupStackSize(uint64_t debugGroupStackSize);
+
MaybeError ValidateRenderBundle(CommandIterator* commands,
const AttachmentState* attachmentState,
PassResourceUsage* resourceUsage);
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.cpp b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
index eb1179103df..0ac1f033b6b 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
@@ -144,14 +144,9 @@ namespace dawn_native {
SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
cmd->~SetIndexBufferCmd();
} break;
- case Command::SetVertexBuffers: {
- SetVertexBuffersCmd* cmd = commands->NextCommand<SetVertexBuffersCmd>();
- auto buffers = commands->NextData<Ref<BufferBase>>(cmd->count);
- for (size_t i = 0; i < cmd->count; ++i) {
- (&buffers[i])->~Ref<BufferBase>();
- }
- commands->NextData<uint64_t>(cmd->count);
- cmd->~SetVertexBuffersCmd();
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
+ cmd->~SetVertexBufferCmd();
} break;
}
}
@@ -267,10 +262,8 @@ namespace dawn_native {
commands->NextCommand<SetIndexBufferCmd>();
break;
- case Command::SetVertexBuffers: {
- auto* cmd = commands->NextCommand<SetVertexBuffersCmd>();
- commands->NextData<Ref<BufferBase>>(cmd->count);
- commands->NextData<uint64_t>(cmd->count);
+ case Command::SetVertexBuffer: {
+ commands->NextCommand<SetVertexBufferCmd>();
} break;
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.h b/chromium/third_party/dawn/src/dawn_native/Commands.h
index 18d834d72b3..eeaf9dc2df5 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.h
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.h
@@ -58,7 +58,7 @@ namespace dawn_native {
SetBlendColor,
SetBindGroup,
SetIndexBuffer,
- SetVertexBuffers,
+ SetVertexBuffer,
};
struct BeginComputePassCmd {};
@@ -220,9 +220,10 @@ namespace dawn_native {
uint64_t offset;
};
- struct SetVertexBuffersCmd {
- uint32_t startSlot;
- uint32_t count;
+ struct SetVertexBufferCmd {
+ uint32_t slot;
+ Ref<BufferBase> buffer;
+ uint64_t offset;
};
// This needs to be called before the CommandIterator is freed so that the Ref<> present in
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
index a1e941b9c3c..6b207066a00 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
@@ -26,8 +26,8 @@ namespace dawn_native {
}
DAWN_TRY(device->ValidateObject(descriptor->layout));
- DAWN_TRY(ValidatePipelineStageDescriptor(device, &descriptor->computeStage,
- descriptor->layout, SingleShaderStage::Compute));
+ DAWN_TRY(ValidateProgrammableStageDescriptor(
+ device, &descriptor->computeStage, descriptor->layout, SingleShaderStage::Compute));
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
index f0c3bc4a41f..e63e3b8efd5 100644
--- a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
@@ -134,4 +134,10 @@ namespace dawn_native {
return deviceBase->GetLazyClearCountForTesting();
}
+ std::vector<const char*> GetProcMapNamesForTestingInternal();
+
+ std::vector<const char*> GetProcMapNamesForTesting() {
+ return GetProcMapNamesForTestingInternal();
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.cpp b/chromium/third_party/dawn/src/dawn_native/Device.cpp
index e667c12477b..bf36778d1ca 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Device.cpp
@@ -24,6 +24,8 @@
#include "dawn_native/ComputePipeline.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/ErrorScope.h"
+#include "dawn_native/ErrorScopeTracker.h"
#include "dawn_native/Fence.h"
#include "dawn_native/FenceSignalTracker.h"
#include "dawn_native/Instance.h"
@@ -35,6 +37,7 @@
#include "dawn_native/ShaderModule.h"
#include "dawn_native/SwapChain.h"
#include "dawn_native/Texture.h"
+#include "dawn_native/ValidationUtils_autogen.h"
#include <unordered_set>
@@ -61,8 +64,11 @@ namespace dawn_native {
// DeviceBase
DeviceBase::DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor)
- : mAdapter(adapter) {
+ : mAdapter(adapter),
+ mRootErrorScope(AcquireRef(new ErrorScope())),
+ mCurrentErrorScope(mRootErrorScope.Get()) {
mCaches = std::make_unique<DeviceBase::Caches>();
+ mErrorScopeTracker = std::make_unique<ErrorScopeTracker>(this);
mFenceSignalTracker = std::make_unique<FenceSignalTracker>(this);
mDynamicUploader = std::make_unique<DynamicUploader>(this);
SetDefaultToggles();
@@ -89,28 +95,54 @@ namespace dawn_native {
}
void DeviceBase::HandleError(dawn::ErrorType type, const char* message) {
- if (mErrorCallback) {
- mErrorCallback(static_cast<DawnErrorType>(type), message, mErrorUserdata);
+ mCurrentErrorScope->HandleError(type, message);
+ }
+
+ void DeviceBase::InjectError(dawn::ErrorType type, const char* message) {
+ if (ConsumedError(ValidateErrorType(type))) {
+ return;
+ }
+ if (DAWN_UNLIKELY(type == dawn::ErrorType::NoError)) {
+ HandleError(dawn::ErrorType::Validation, "Invalid injected error NoError");
+ return;
}
+ HandleError(type, message);
+ }
+
+ void DeviceBase::ConsumeError(ErrorData* error) {
+ ASSERT(error != nullptr);
+ HandleError(error->GetType(), error->GetMessage().c_str());
+ delete error;
}
void DeviceBase::SetUncapturedErrorCallback(dawn::ErrorCallback callback, void* userdata) {
- mErrorCallback = callback;
- mErrorUserdata = userdata;
+ mRootErrorScope->SetCallback(callback, userdata);
}
void DeviceBase::PushErrorScope(dawn::ErrorFilter filter) {
- // TODO(crbug.com/dawn/153): Implement error scopes.
- HandleError(dawn::ErrorType::Validation, "Error scopes not implemented");
+ if (ConsumedError(ValidateErrorFilter(filter))) {
+ return;
+ }
+ mCurrentErrorScope = AcquireRef(new ErrorScope(filter, mCurrentErrorScope.Get()));
}
bool DeviceBase::PopErrorScope(dawn::ErrorCallback callback, void* userdata) {
- // TODO(crbug.com/dawn/153): Implement error scopes.
- HandleError(dawn::ErrorType::Validation, "Error scopes not implemented");
- return false;
+ if (DAWN_UNLIKELY(mCurrentErrorScope.Get() == mRootErrorScope.Get())) {
+ return false;
+ }
+ mCurrentErrorScope->SetCallback(callback, userdata);
+ mCurrentErrorScope = Ref<ErrorScope>(mCurrentErrorScope->GetParent());
+
+ return true;
+ }
+
+ ErrorScope* DeviceBase::GetCurrentErrorScope() {
+ ASSERT(mCurrentErrorScope.Get() != nullptr);
+ return mCurrentErrorScope.Get();
}
MaybeError DeviceBase::ValidateObject(const ObjectBase* object) const {
+ ASSERT(object != nullptr);
if (DAWN_UNLIKELY(object->GetDevice() != this)) {
return DAWN_VALIDATION_ERROR("Object from a different device.");
}
@@ -128,6 +160,10 @@ namespace dawn_native {
return GetAdapter()->GetInstance()->GetPlatform();
}
+ ErrorScopeTracker* DeviceBase::GetErrorScopeTracker() const {
+ return mErrorScopeTracker.get();
+ }
+
FenceSignalTracker* DeviceBase::GetFenceSignalTracker() const {
return mFenceSignalTracker.get();
}
@@ -286,8 +322,7 @@ namespace dawn_native {
return static_cast<AttachmentState*>(*iter);
}
- Ref<AttachmentState> attachmentState = new AttachmentState(this, *blueprint);
- attachmentState->Release();
+ Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
mCaches->attachmentStates.insert(attachmentState.Get());
return attachmentState;
}
@@ -502,13 +537,16 @@ namespace dawn_native {
// Other Device API methods
void DeviceBase::Tick() {
- TickImpl();
+ if (ConsumedError(TickImpl()))
+ return;
+
{
auto deferredResults = std::move(mDeferredCreateBufferMappedAsyncResults);
for (const auto& deferred : deferredResults) {
deferred.callback(deferred.status, deferred.result, deferred.userdata);
}
}
+ mErrorScopeTracker->Tick(GetCompletedCommandSerial());
mFenceSignalTracker->Tick(GetCompletedCommandSerial());
}
@@ -680,16 +718,7 @@ namespace dawn_native {
// Other implementation details
- void DeviceBase::ConsumeError(ErrorData* error) {
- ASSERT(error != nullptr);
- HandleError(error->GetType(), error->GetMessage().c_str());
- delete error;
- }
-
- ResultOrError<DynamicUploader*> DeviceBase::GetDynamicUploader() const {
- if (mDynamicUploader->IsEmpty()) {
- DAWN_TRY(mDynamicUploader->CreateAndAppendBuffer());
- }
+ DynamicUploader* DeviceBase::GetDynamicUploader() const {
return mDynamicUploader.get();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.h b/chromium/third_party/dawn/src/dawn_native/Device.h
index 60cdbba9026..85ca254e05c 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.h
+++ b/chromium/third_party/dawn/src/dawn_native/Device.h
@@ -35,6 +35,8 @@ namespace dawn_native {
class AdapterBase;
class AttachmentState;
class AttachmentStateBlueprint;
+ class ErrorScope;
+ class ErrorScopeTracker;
class FenceSignalTracker;
class DynamicUploader;
class StagingBufferBase;
@@ -54,11 +56,22 @@ namespace dawn_native {
return false;
}
+ template <typename T>
+ bool ConsumedError(ResultOrError<T> resultOrError, T* result) {
+ if (DAWN_UNLIKELY(resultOrError.IsError())) {
+ ConsumeError(resultOrError.AcquireError());
+ return true;
+ }
+ *result = resultOrError.AcquireSuccess();
+ return false;
+ }
+
MaybeError ValidateObject(const ObjectBase* object) const;
AdapterBase* GetAdapter() const;
dawn_platform::Platform* GetPlatform() const;
+ ErrorScopeTracker* GetErrorScopeTracker() const;
FenceSignalTracker* GetFenceSignalTracker() const;
// Returns the Format corresponding to the dawn::TextureFormat or an error if the format
@@ -78,7 +91,7 @@ namespace dawn_native {
virtual Serial GetCompletedCommandSerial() const = 0;
virtual Serial GetLastSubmittedCommandSerial() const = 0;
virtual Serial GetPendingCommandSerial() const = 0;
- virtual void TickImpl() = 0;
+ virtual MaybeError TickImpl() = 0;
// Many Dawn objects are completely immutable once created which means that if two
// creations are given the same arguments, they can return the same object. Reusing
@@ -146,11 +159,14 @@ namespace dawn_native {
TextureViewBase* CreateTextureView(TextureBase* texture,
const TextureViewDescriptor* descriptor);
+ void InjectError(dawn::ErrorType type, const char* message);
+
void Tick();
void SetUncapturedErrorCallback(dawn::ErrorCallback callback, void* userdata);
void PushErrorScope(dawn::ErrorFilter filter);
bool PopErrorScope(dawn::ErrorCallback callback, void* userdata);
+ ErrorScope* GetCurrentErrorScope();
void Reference();
void Release();
@@ -163,7 +179,7 @@ namespace dawn_native {
uint64_t destinationOffset,
uint64_t size) = 0;
- ResultOrError<DynamicUploader*> GetDynamicUploader() const;
+ DynamicUploader* GetDynamicUploader() const;
std::vector<const char*> GetEnabledExtensions() const;
std::vector<const char*> GetTogglesUsed() const;
@@ -230,11 +246,15 @@ namespace dawn_native {
void ApplyExtensions(const DeviceDescriptor* deviceDescriptor);
- void ConsumeError(ErrorData* error);
void SetDefaultToggles();
+ void ConsumeError(ErrorData* error);
+
AdapterBase* mAdapter = nullptr;
+ Ref<ErrorScope> mRootErrorScope;
+ Ref<ErrorScope> mCurrentErrorScope;
+
// The object caches aren't exposed in the header as they would require a lot of
// additional includes.
struct Caches;
@@ -247,11 +267,10 @@ namespace dawn_native {
void* userdata;
};
+ std::unique_ptr<ErrorScopeTracker> mErrorScopeTracker;
std::unique_ptr<FenceSignalTracker> mFenceSignalTracker;
std::vector<DeferredCreateBufferMappedAsync> mDeferredCreateBufferMappedAsyncResults;
- dawn::ErrorCallback mErrorCallback = nullptr;
- void* mErrorUserdata = 0;
uint32_t mRefCount = 1;
FormatTable mFormatTable;
diff --git a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
index c7163324290..876b7896689 100644
--- a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
@@ -18,15 +18,9 @@
namespace dawn_native {
- DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
- }
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> DynamicUploader::CreateStagingBuffer(
- size_t size) {
- std::unique_ptr<StagingBufferBase> stagingBuffer;
- DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(size));
- DAWN_TRY(stagingBuffer->Initialize());
- return stagingBuffer;
+ DynamicUploader::DynamicUploader(DeviceBase* device, size_t size) : mDevice(device) {
+ mRingBuffers.emplace_back(
+ std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, RingBufferAllocator(size)}));
}
void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
@@ -34,73 +28,78 @@ namespace dawn_native {
mDevice->GetPendingCommandSerial());
}
- MaybeError DynamicUploader::CreateAndAppendBuffer(size_t size) {
- std::unique_ptr<RingBuffer> ringBuffer = std::make_unique<RingBuffer>(mDevice, size);
- DAWN_TRY(ringBuffer->Initialize());
- mRingBuffers.emplace_back(std::move(ringBuffer));
- return {};
- }
-
- ResultOrError<UploadHandle> DynamicUploader::Allocate(uint32_t size) {
+ ResultOrError<UploadHandle> DynamicUploader::Allocate(size_t allocationSize, Serial serial) {
// Note: Validation ensures size is already aligned.
// First-fit: find next smallest buffer large enough to satisfy the allocation request.
- RingBuffer* targetRingBuffer = GetLargestBuffer();
+ RingBuffer* targetRingBuffer = mRingBuffers.back().get();
for (auto& ringBuffer : mRingBuffers) {
+ const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
// Prevent overflow.
- ASSERT(ringBuffer->GetSize() >= ringBuffer->GetUsedSize());
- const size_t remainingSize = ringBuffer->GetSize() - ringBuffer->GetUsedSize();
- if (size <= remainingSize) {
+ ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
+ const size_t remainingSize =
+ ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
+ if (allocationSize <= remainingSize) {
targetRingBuffer = ringBuffer.get();
break;
}
}
- UploadHandle uploadHandle = UploadHandle{};
+ size_t startOffset = RingBufferAllocator::kInvalidOffset;
if (targetRingBuffer != nullptr) {
- uploadHandle = targetRingBuffer->SubAllocate(size);
+ startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
}
// Upon failure, append a newly created (and much larger) ring buffer to fulfill the
// request.
- if (uploadHandle.mappedBuffer == nullptr) {
+ if (startOffset == RingBufferAllocator::kInvalidOffset) {
// Compute the new max size (in powers of two to preserve alignment).
- size_t newMaxSize = targetRingBuffer->GetSize() * 2;
- while (newMaxSize < size) {
+ size_t newMaxSize = targetRingBuffer->mAllocator.GetSize() * 2;
+ while (newMaxSize < allocationSize) {
newMaxSize *= 2;
}
// TODO(bryan.bernhart@intel.com): Fall-back to no sub-allocations should this fail.
- DAWN_TRY(CreateAndAppendBuffer(newMaxSize));
- targetRingBuffer = GetLargestBuffer();
- uploadHandle = targetRingBuffer->SubAllocate(size);
+ mRingBuffers.emplace_back(std::unique_ptr<RingBuffer>(
+ new RingBuffer{nullptr, RingBufferAllocator(newMaxSize)}));
+
+ targetRingBuffer = mRingBuffers.back().get();
+ startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
}
- uploadHandle.stagingBuffer = targetRingBuffer->GetStagingBuffer();
+ ASSERT(startOffset != RingBufferAllocator::kInvalidOffset);
+
+ // Allocate the staging buffer backing the ringbuffer.
+ // Note: the first ringbuffer will be lazily created.
+ if (targetRingBuffer->mStagingBuffer == nullptr) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer;
+ DAWN_TRY_ASSIGN(stagingBuffer,
+ mDevice->CreateStagingBuffer(targetRingBuffer->mAllocator.GetSize()));
+ targetRingBuffer->mStagingBuffer = std::move(stagingBuffer);
+ }
+
+ ASSERT(targetRingBuffer->mStagingBuffer != nullptr);
+
+ UploadHandle uploadHandle;
+ uploadHandle.stagingBuffer = targetRingBuffer->mStagingBuffer.get();
+ uploadHandle.mappedBuffer =
+ static_cast<uint8_t*>(uploadHandle.stagingBuffer->GetMappedPointer()) + startOffset;
+ uploadHandle.startOffset = startOffset;
return uploadHandle;
}
- void DynamicUploader::Tick(Serial lastCompletedSerial) {
+ void DynamicUploader::Deallocate(Serial lastCompletedSerial) {
// Reclaim memory within the ring buffers by ticking (or removing requests no longer
// in-flight).
for (size_t i = 0; i < mRingBuffers.size(); ++i) {
- mRingBuffers[i]->Tick(lastCompletedSerial);
+ mRingBuffers[i]->mAllocator.Deallocate(lastCompletedSerial);
// Never erase the last buffer as to prevent re-creating smaller buffers
// again. The last buffer is the largest.
- if (mRingBuffers[i]->Empty() && i < mRingBuffers.size() - 1) {
+ if (mRingBuffers[i]->mAllocator.Empty() && i < mRingBuffers.size() - 1) {
mRingBuffers.erase(mRingBuffers.begin() + i);
}
}
mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
}
-
- RingBuffer* DynamicUploader::GetLargestBuffer() {
- ASSERT(!mRingBuffers.empty());
- return mRingBuffers.back().get();
- }
-
- bool DynamicUploader::IsEmpty() const {
- return mRingBuffers.empty();
- }
} // namespace dawn_native \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
index 0caecb9919d..f0d4510f153 100644
--- a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
+++ b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
@@ -16,37 +16,42 @@
#define DAWNNATIVE_DYNAMICUPLOADER_H_
#include "dawn_native/Forward.h"
-#include "dawn_native/RingBuffer.h"
+#include "dawn_native/RingBufferAllocator.h"
+#include "dawn_native/StagingBuffer.h"
// DynamicUploader is the front-end implementation used to manage multiple ring buffers for upload
// usage.
namespace dawn_native {
+ struct UploadHandle {
+ uint8_t* mappedBuffer = nullptr;
+ size_t startOffset = 0;
+ StagingBufferBase* stagingBuffer = nullptr;
+ };
+
class DynamicUploader {
public:
- DynamicUploader(DeviceBase* device);
+ DynamicUploader(DeviceBase* device, size_t size = kBaseUploadBufferSize);
~DynamicUploader() = default;
- // We add functions to Create/Release StagingBuffers to the DynamicUploader as there's
+ // We add functions to Release StagingBuffers to the DynamicUploader as there's
// currently no place to track the allocated staging buffers such that they're freed after
// pending commands are finished. This should be changed when better resource allocation is
// implemented.
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size);
void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
- ResultOrError<UploadHandle> Allocate(uint32_t size);
- void Tick(Serial lastCompletedSerial);
-
- RingBuffer* GetLargestBuffer();
-
- MaybeError CreateAndAppendBuffer(size_t size = kBaseUploadBufferSize);
-
- bool IsEmpty() const;
+ ResultOrError<UploadHandle> Allocate(size_t allocationSize, Serial serial);
+ void Deallocate(Serial lastCompletedSerial);
private:
// TODO(bryan.bernhart@intel.com): Figure out this value.
static constexpr size_t kBaseUploadBufferSize = 64000;
+ struct RingBuffer {
+ std::unique_ptr<StagingBufferBase> mStagingBuffer;
+ RingBufferAllocator mAllocator;
+ };
+
std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
SerialQueue<std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
DeviceBase* mDevice;
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
new file mode 100644
index 00000000000..1758ef71b3b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
@@ -0,0 +1,115 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/ErrorScope.h"
+
+#include "common/Assert.h"
+
+namespace dawn_native {
+
+ ErrorScope::ErrorScope() = default;
+
+ ErrorScope::ErrorScope(dawn::ErrorFilter errorFilter, ErrorScope* parent)
+ : RefCounted(), mErrorFilter(errorFilter), mParent(parent) {
+ ASSERT(mParent.Get() != nullptr);
+ }
+
+ ErrorScope::~ErrorScope() {
+ if (mCallback == nullptr || IsRoot()) {
+ return;
+ }
+ mCallback(static_cast<DawnErrorType>(mErrorType), mErrorMessage.c_str(), mUserdata);
+ }
+
+ void ErrorScope::SetCallback(dawn::ErrorCallback callback, void* userdata) {
+ mCallback = callback;
+ mUserdata = userdata;
+ }
+
+ ErrorScope* ErrorScope::GetParent() {
+ return mParent.Get();
+ }
+
+ bool ErrorScope::IsRoot() const {
+ return mParent.Get() == nullptr;
+ }
+
+ void ErrorScope::HandleError(dawn::ErrorType type, const char* message) {
+ HandleErrorImpl(this, type, message);
+ }
+
+ // static
+ void ErrorScope::HandleErrorImpl(ErrorScope* scope, dawn::ErrorType type, const char* message) {
+ ErrorScope* currentScope = scope;
+ for (; !currentScope->IsRoot(); currentScope = currentScope->GetParent()) {
+ ASSERT(currentScope != nullptr);
+
+ bool consumed = false;
+ switch (type) {
+ case dawn::ErrorType::Validation:
+ if (currentScope->mErrorFilter != dawn::ErrorFilter::Validation) {
+ // Error filter does not match. Move on to the next scope.
+ continue;
+ }
+ consumed = true;
+ break;
+
+ case dawn::ErrorType::OutOfMemory:
+ if (currentScope->mErrorFilter != dawn::ErrorFilter::OutOfMemory) {
+ // Error filter does not match. Move on to the next scope.
+ continue;
+ }
+ consumed = true;
+ break;
+
+ // Unknown and DeviceLost are fatal. All error scopes capture them.
+ // |consumed| is false because these should bubble to all scopes.
+ case dawn::ErrorType::Unknown:
+ case dawn::ErrorType::DeviceLost:
+ consumed = false;
+ break;
+
+ case dawn::ErrorType::NoError:
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ // Record the error if the scope doesn't have one yet.
+ if (currentScope->mErrorType == dawn::ErrorType::NoError) {
+ currentScope->mErrorType = type;
+ currentScope->mErrorMessage = message;
+ }
+
+ if (consumed) {
+ return;
+ }
+ }
+
+ // The root error scope captures all uncaptured errors.
+ ASSERT(currentScope->IsRoot());
+ if (currentScope->mCallback) {
+ currentScope->mCallback(static_cast<DawnErrorType>(type), message,
+ currentScope->mUserdata);
+ }
+ }
+
+ void ErrorScope::Destroy() {
+ if (!IsRoot()) {
+ mErrorType = dawn::ErrorType::Unknown;
+ mErrorMessage = "Error scope destroyed";
+ }
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScope.h b/chromium/third_party/dawn/src/dawn_native/ErrorScope.h
new file mode 100644
index 00000000000..f3218cb41ef
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorScope.h
@@ -0,0 +1,67 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERRORSCOPE_H_
+#define DAWNNATIVE_ERRORSCOPE_H_
+
+#include "dawn_native/dawn_platform.h"
+
+#include "dawn_native/RefCounted.h"
+
+#include <string>
+
+namespace dawn_native {
+
+ // Errors can be recorded into an ErrorScope by calling |HandleError|.
+ // Because an error scope should not resolve until contained
+ // commands are complete, calling the callback is deferred until it is destructed.
+ // In-flight commands or asynchronous events should hold a reference to the
+ // ErrorScope for their duration.
+ //
+ // Because parent ErrorScopes should not resolve before child ErrorScopes,
+ // ErrorScopes hold a reference to their parent.
+ //
+ // To simplify ErrorHandling, there is a sentinel root error scope which has
+ // no parent. All uncaptured errors are handled by the root error scope. Its
+ // callback is called immediately once it encounters an error.
+ class ErrorScope : public RefCounted {
+ public:
+ ErrorScope(); // Constructor for the root error scope.
+ ErrorScope(dawn::ErrorFilter errorFilter, ErrorScope* parent);
+ ~ErrorScope();
+
+ void SetCallback(dawn::ErrorCallback callback, void* userdata);
+ ErrorScope* GetParent();
+
+ void HandleError(dawn::ErrorType type, const char* message);
+
+ void Destroy();
+
+ private:
+ bool IsRoot() const;
+ static void HandleErrorImpl(ErrorScope* scope, dawn::ErrorType type, const char* message);
+
+ dawn::ErrorFilter mErrorFilter = dawn::ErrorFilter::None;
+ Ref<ErrorScope> mParent = nullptr;
+
+ dawn::ErrorCallback mCallback = nullptr;
+ void* mUserdata = nullptr;
+
+ dawn::ErrorType mErrorType = dawn::ErrorType::NoError;
+ std::string mErrorMessage = "";
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_ERRORSCOPE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.cpp
new file mode 100644
index 00000000000..ea5d5a48cd6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.cpp
@@ -0,0 +1,46 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/ErrorScopeTracker.h"
+
+#include "dawn_native/Device.h"
+#include "dawn_native/ErrorScope.h"
+
+#include <limits>
+
+namespace dawn_native {
+
+ ErrorScopeTracker::ErrorScopeTracker(DeviceBase* device) : mDevice(device) {
+ }
+
+ ErrorScopeTracker::~ErrorScopeTracker() {
+ // The tracker is destroyed when the Device is destroyed. We need to
+ // call Destroy on all in-flight error scopes so they resolve their callbacks
+ // with UNKNOWN.
+ constexpr Serial maxSerial = std::numeric_limits<Serial>::max();
+ for (Ref<ErrorScope>& scope : mScopesInFlight.IterateUpTo(maxSerial)) {
+ scope->Destroy();
+ }
+ Tick(maxSerial);
+ }
+
+ void ErrorScopeTracker::TrackUntilLastSubmitComplete(ErrorScope* scope) {
+ mScopesInFlight.Enqueue(scope, mDevice->GetLastSubmittedCommandSerial());
+ }
+
+ void ErrorScopeTracker::Tick(Serial completedSerial) {
+ mScopesInFlight.ClearUpTo(completedSerial);
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.h b/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.h
new file mode 100644
index 00000000000..7337eb64141
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorScopeTracker.h
@@ -0,0 +1,42 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERRORSCOPETRACKER_H_
+#define DAWNNATIVE_ERRORSCOPETRACKER_H_
+
+#include "common/SerialQueue.h"
+#include "dawn_native/RefCounted.h"
+
+namespace dawn_native {
+
+ class DeviceBase;
+ class ErrorScope;
+
+ class ErrorScopeTracker {
+ public:
+ ErrorScopeTracker(DeviceBase* device);
+ ~ErrorScopeTracker();
+
+ void TrackUntilLastSubmitComplete(ErrorScope* scope);
+
+ void Tick(Serial completedSerial);
+
+ protected:
+ DeviceBase* mDevice;
+ SerialQueue<Ref<ErrorScope>> mScopesInFlight;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_ERRORSCOPETRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/MemoryAllocator.h b/chromium/third_party/dawn/src/dawn_native/MemoryAllocator.h
new file mode 100644
index 00000000000..e932c91cd50
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/MemoryAllocator.h
@@ -0,0 +1,33 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_MEMORYALLOCATOR_H_
+#define DAWNNATIVE_MEMORYALLOCATOR_H_
+
+#include "dawn_native/Error.h"
+#include "dawn_native/ResourceHeap.h"
+
+namespace dawn_native {
+ // Interface for backend allocators that create physical device memory.
+ class MemoryAllocator {
+ public:
+ virtual ~MemoryAllocator() = default;
+
+ virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> Allocate(uint64_t size,
+ int memoryFlags) = 0;
+ virtual void Deallocate(std::unique_ptr<ResourceHeapBase> allocation) = 0;
+ };
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_MEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
index 91b2ed090bd..a8c6e9b711a 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
@@ -20,10 +20,10 @@
namespace dawn_native {
- MaybeError ValidatePipelineStageDescriptor(const DeviceBase* device,
- const PipelineStageDescriptor* descriptor,
- const PipelineLayoutBase* layout,
- SingleShaderStage stage) {
+ MaybeError ValidateProgrammableStageDescriptor(const DeviceBase* device,
+ const ProgrammableStageDescriptor* descriptor,
+ const PipelineLayoutBase* layout,
+ SingleShaderStage stage) {
DAWN_TRY(device->ValidateObject(descriptor->module));
if (descriptor->entryPoint != std::string("main")) {
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.h b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
index ea989ed7330..21599548af1 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
@@ -28,10 +28,10 @@
namespace dawn_native {
- MaybeError ValidatePipelineStageDescriptor(const DeviceBase* device,
- const PipelineStageDescriptor* descriptor,
- const PipelineLayoutBase* layout,
- SingleShaderStage stage);
+ MaybeError ValidateProgrammableStageDescriptor(const DeviceBase* device,
+ const ProgrammableStageDescriptor* descriptor,
+ const PipelineLayoutBase* layout,
+ SingleShaderStage stage);
class PipelineBase : public ObjectBase {
public:
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.cpp b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
index 46ead116368..f13079a1372 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
@@ -17,6 +17,8 @@
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandBuffer.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ErrorScope.h"
+#include "dawn_native/ErrorScopeTracker.h"
#include "dawn_native/Fence.h"
#include "dawn_native/FenceSignalTracker.h"
#include "dawn_native/Texture.h"
@@ -30,24 +32,31 @@ namespace dawn_native {
}
void QueueBase::Submit(uint32_t commandCount, CommandBufferBase* const* commands) {
- TRACE_EVENT0(GetDevice()->GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "Queue::Submit");
- if (GetDevice()->ConsumedError(ValidateSubmit(commandCount, commands))) {
+ DeviceBase* device = GetDevice();
+ TRACE_EVENT0(device->GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"), "Queue::Submit");
+ if (device->ConsumedError(ValidateSubmit(commandCount, commands))) {
return;
}
ASSERT(!IsError());
- SubmitImpl(commandCount, commands);
+ if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
+ return;
+ }
+ device->GetErrorScopeTracker()->TrackUntilLastSubmitComplete(
+ device->GetCurrentErrorScope());
}
void QueueBase::Signal(FenceBase* fence, uint64_t signalValue) {
- if (GetDevice()->ConsumedError(ValidateSignal(fence, signalValue))) {
+ DeviceBase* device = GetDevice();
+ if (device->ConsumedError(ValidateSignal(fence, signalValue))) {
return;
}
ASSERT(!IsError());
fence->SetSignaledValue(signalValue);
- GetDevice()->GetFenceSignalTracker()->UpdateFenceOnComplete(fence, signalValue);
+ device->GetFenceSignalTracker()->UpdateFenceOnComplete(fence, signalValue);
+ device->GetErrorScopeTracker()->TrackUntilLastSubmitComplete(
+ device->GetCurrentErrorScope());
}
FenceBase* QueueBase::CreateFence(const FenceDescriptor* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.h b/chromium/third_party/dawn/src/dawn_native/Queue.h
index 7b1031eab5a..5d94b3ba9b4 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.h
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.h
@@ -33,7 +33,8 @@ namespace dawn_native {
FenceBase* CreateFence(const FenceDescriptor* descriptor);
private:
- virtual void SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) = 0;
+ virtual MaybeError SubmitImpl(uint32_t commandCount,
+ CommandBufferBase* const* commands) = 0;
MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands);
MaybeError ValidateSignal(const FenceBase* fence, uint64_t signalValue);
diff --git a/chromium/third_party/dawn/src/dawn_native/RefCounted.h b/chromium/third_party/dawn/src/dawn_native/RefCounted.h
index 6eb4ab0b61e..89b0666fedb 100644
--- a/chromium/third_party/dawn/src/dawn_native/RefCounted.h
+++ b/chromium/third_party/dawn/src/dawn_native/RefCounted.h
@@ -104,6 +104,12 @@ namespace dawn_native {
return mPointee;
}
+ T* Detach() {
+ T* pointee = mPointee;
+ mPointee = nullptr;
+ return pointee;
+ }
+
private:
void Reference() const {
if (mPointee != nullptr) {
@@ -120,6 +126,13 @@ namespace dawn_native {
T* mPointee = nullptr;
};
+ template <typename T>
+ Ref<T> AcquireRef(T* pointee) {
+ Ref<T> ref(pointee);
+ ref->Release();
+ return ref;
+ }
+
} // namespace dawn_native
#endif // DAWNNATIVE_REFCOUNTED_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
index ca80aefc75d..aecaf30afa1 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
@@ -129,27 +129,15 @@ namespace dawn_native {
});
}
- void RenderEncoderBase::SetVertexBuffers(uint32_t startSlot,
- uint32_t count,
- BufferBase* const* buffers,
- uint64_t const* offsets) {
+ void RenderEncoderBase::SetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- for (size_t i = 0; i < count; ++i) {
- DAWN_TRY(GetDevice()->ValidateObject(buffers[i]));
- }
-
- SetVertexBuffersCmd* cmd =
- allocator->Allocate<SetVertexBuffersCmd>(Command::SetVertexBuffers);
- cmd->startSlot = startSlot;
- cmd->count = count;
-
- Ref<BufferBase>* cmdBuffers = allocator->AllocateData<Ref<BufferBase>>(count);
- for (size_t i = 0; i < count; ++i) {
- cmdBuffers[i] = buffers[i];
- }
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
- uint64_t* cmdOffsets = allocator->AllocateData<uint64_t>(count);
- memcpy(cmdOffsets, offsets, count * sizeof(uint64_t));
+ SetVertexBufferCmd* cmd =
+ allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
+ cmd->slot = slot;
+ cmd->buffer = buffer;
+ cmd->offset = offset;
return {};
});
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
index 19061bc8120..906c9e09af7 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
@@ -39,18 +39,7 @@ namespace dawn_native {
void SetPipeline(RenderPipelineBase* pipeline);
- template <typename T>
- void SetVertexBuffers(uint32_t startSlot,
- uint32_t count,
- T* const* buffers,
- uint64_t const* offsets) {
- static_assert(std::is_base_of<BufferBase, T>::value, "");
- SetVertexBuffers(startSlot, count, buffers, offsets);
- }
- void SetVertexBuffers(uint32_t startSlot,
- uint32_t count,
- BufferBase* const* buffers,
- uint64_t const* offsets);
+ void SetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset);
void SetIndexBuffer(BufferBase* buffer, uint64_t offset);
protected:
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
index 1550a26a46b..daba3a8ff2c 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
@@ -49,6 +49,10 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Setting attribute offset out of bounds");
}
+ if (attribute->offset % 4 != 0) {
+ return DAWN_VALIDATION_ERROR("Attribute offset needs to be a multiple of 4 bytes");
+ }
+
if ((*attributesSetMask)[attribute->shaderLocation]) {
return DAWN_VALIDATION_ERROR("Setting already set attribute");
}
@@ -67,7 +71,7 @@ namespace dawn_native {
if (buffer->stride % 4 != 0) {
return DAWN_VALIDATION_ERROR(
- "Stride of Vertex buffer needs to be multiple of 4 bytes");
+ "Stride of Vertex buffer needs to be a multiple of 4 bytes");
}
for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
@@ -117,23 +121,29 @@ namespace dawn_native {
}
MaybeError ValidateColorStateDescriptor(const DeviceBase* device,
- const ColorStateDescriptor* descriptor) {
- if (descriptor->nextInChain != nullptr) {
+ const ColorStateDescriptor& descriptor,
+ Format::Type fragmentOutputBaseType) {
+ if (descriptor.nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
- DAWN_TRY(ValidateBlendOperation(descriptor->alphaBlend.operation));
- DAWN_TRY(ValidateBlendFactor(descriptor->alphaBlend.srcFactor));
- DAWN_TRY(ValidateBlendFactor(descriptor->alphaBlend.dstFactor));
- DAWN_TRY(ValidateBlendOperation(descriptor->colorBlend.operation));
- DAWN_TRY(ValidateBlendFactor(descriptor->colorBlend.srcFactor));
- DAWN_TRY(ValidateBlendFactor(descriptor->colorBlend.dstFactor));
- DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
+ DAWN_TRY(ValidateBlendOperation(descriptor.alphaBlend.operation));
+ DAWN_TRY(ValidateBlendFactor(descriptor.alphaBlend.srcFactor));
+ DAWN_TRY(ValidateBlendFactor(descriptor.alphaBlend.dstFactor));
+ DAWN_TRY(ValidateBlendOperation(descriptor.colorBlend.operation));
+ DAWN_TRY(ValidateBlendFactor(descriptor.colorBlend.srcFactor));
+ DAWN_TRY(ValidateBlendFactor(descriptor.colorBlend.dstFactor));
+ DAWN_TRY(ValidateColorWriteMask(descriptor.writeMask));
const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor.format));
if (!format->IsColor() || !format->isRenderable) {
return DAWN_VALIDATION_ERROR("Color format must be color renderable");
}
+ if (fragmentOutputBaseType != Format::Type::Other &&
+ fragmentOutputBaseType != format->type) {
+ return DAWN_VALIDATION_ERROR(
+ "Color format must match the fragment stage output type");
+ }
return {};
}
@@ -283,10 +293,10 @@ namespace dawn_native {
}
DAWN_TRY(ValidatePrimitiveTopology(descriptor->primitiveTopology));
- DAWN_TRY(ValidatePipelineStageDescriptor(device, &descriptor->vertexStage,
- descriptor->layout, SingleShaderStage::Vertex));
- DAWN_TRY(ValidatePipelineStageDescriptor(device, descriptor->fragmentStage,
- descriptor->layout, SingleShaderStage::Fragment));
+ DAWN_TRY(ValidateProgrammableStageDescriptor(
+ device, &descriptor->vertexStage, descriptor->layout, SingleShaderStage::Vertex));
+ DAWN_TRY(ValidateProgrammableStageDescriptor(
+ device, descriptor->fragmentStage, descriptor->layout, SingleShaderStage::Fragment));
if (descriptor->rasterizationState) {
DAWN_TRY(ValidateRasterizationStateDescriptor(descriptor->rasterizationState));
@@ -310,8 +320,12 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Should have at least one attachment");
}
+ ASSERT(descriptor->fragmentStage != nullptr);
+ const ShaderModuleBase::FragmentOutputBaseTypes& fragmentOutputBaseTypes =
+ descriptor->fragmentStage->module->GetFragmentOutputBaseTypes();
for (uint32_t i = 0; i < descriptor->colorStateCount; ++i) {
- DAWN_TRY(ValidateColorStateDescriptor(device, descriptor->colorStates[i]));
+ DAWN_TRY(ValidateColorStateDescriptor(device, descriptor->colorStates[i],
+ fragmentOutputBaseTypes[i]));
}
if (descriptor->depthStencilState) {
@@ -420,7 +434,7 @@ namespace dawn_native {
}
for (uint32_t i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
- mColorStates[i] = *descriptor->colorStates[i];
+ mColorStates[i] = descriptor->colorStates[i];
}
// TODO(cwallez@chromium.org): Check against the shader module that the correct color
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h b/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h
index c4d67070329..6e6af795122 100644
--- a/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h
+++ b/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h
@@ -21,7 +21,7 @@ namespace dawn_native {
// Wrapper for a resource backed by a heap.
class ResourceHeapBase {
- protected:
+ public:
ResourceHeapBase() = default;
virtual ~ResourceHeapBase() = default;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp
index 1ace4d48fac..0d726c9d66c 100644
--- a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp
@@ -15,39 +15,39 @@
#include "dawn_native/ResourceMemoryAllocation.h"
#include "common/Assert.h"
-#include <limits>
-
namespace dawn_native {
- static constexpr uint64_t INVALID_OFFSET = std::numeric_limits<uint64_t>::max();
-
ResourceMemoryAllocation::ResourceMemoryAllocation()
- : mMethod(AllocationMethod::kInvalid), mOffset(INVALID_OFFSET), mResourceHeap(nullptr) {
+ : mOffset(0), mResourceHeap(nullptr), mMappedPointer(nullptr) {
}
- ResourceMemoryAllocation::ResourceMemoryAllocation(uint64_t offset,
+ ResourceMemoryAllocation::ResourceMemoryAllocation(const AllocationInfo& info,
+ uint64_t offset,
ResourceHeapBase* resourceHeap,
- AllocationMethod method)
- : mMethod(method), mOffset(offset), mResourceHeap(resourceHeap) {
+ uint8_t* mappedPointer)
+ : mInfo(info), mOffset(offset), mResourceHeap(resourceHeap), mMappedPointer(mappedPointer) {
}
ResourceHeapBase* ResourceMemoryAllocation::GetResourceHeap() const {
- ASSERT(mMethod != AllocationMethod::kInvalid);
+ ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
return mResourceHeap;
}
uint64_t ResourceMemoryAllocation::GetOffset() const {
- ASSERT(mMethod != AllocationMethod::kInvalid);
+ ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
return mOffset;
}
- AllocationMethod ResourceMemoryAllocation::GetAllocationMethod() const {
- ASSERT(mMethod != AllocationMethod::kInvalid);
- return mMethod;
+ AllocationInfo ResourceMemoryAllocation::GetInfo() const {
+ return mInfo;
+ }
+
+ uint8_t* ResourceMemoryAllocation::GetMappedPointer() const {
+ return mMappedPointer;
}
void ResourceMemoryAllocation::Invalidate() {
mResourceHeap = nullptr;
- mMethod = AllocationMethod::kInvalid;
+ mInfo = {};
}
} // namespace dawn_native \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
index 4e69a2290ef..12b42a3ee44 100644
--- a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
+++ b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
@@ -35,25 +35,39 @@ namespace dawn_native {
kInvalid
};
+ // Metadata that describes how the allocation was allocated.
+ struct AllocationInfo {
+ // AllocationInfo contains a separate offset to not confuse block vs memory offsets.
+ // The block offset is within the entire allocator memory range and only required by the
+ // buddy sub-allocator to get the corresponding memory. Unlike the block offset, the
+ // allocation offset is always local to the memory.
+ uint64_t mBlockOffset = 0;
+
+ AllocationMethod mMethod = AllocationMethod::kInvalid;
+ };
+
// Handle into a resource heap pool.
class ResourceMemoryAllocation {
public:
ResourceMemoryAllocation();
- ResourceMemoryAllocation(uint64_t offset,
+ ResourceMemoryAllocation(const AllocationInfo& info,
+ uint64_t offset,
ResourceHeapBase* resourceHeap,
- AllocationMethod method);
+ uint8_t* mappedPointer = nullptr);
~ResourceMemoryAllocation() = default;
ResourceHeapBase* GetResourceHeap() const;
uint64_t GetOffset() const;
- AllocationMethod GetAllocationMethod() const;
+ uint8_t* GetMappedPointer() const;
+ AllocationInfo GetInfo() const;
void Invalidate();
private:
- AllocationMethod mMethod;
+ AllocationInfo mInfo;
uint64_t mOffset;
ResourceHeapBase* mResourceHeap;
+ uint8_t* mMappedPointer;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RingBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp
index 90b92138bd2..6cb94b70489 100644
--- a/chromium/third_party/dawn/src/dawn_native/RingBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp
@@ -12,13 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn_native/RingBuffer.h"
-#include "dawn_native/Device.h"
+#include "dawn_native/RingBufferAllocator.h"
-#include <limits>
-
-// Note: Current RingBuffer implementation uses two indices (start and end) to implement a circular
-// queue. However, this approach defines a full queue when one element is still unused.
+// Note: Current RingBufferAllocator implementation uses two indices (start and end) to implement a
+// circular queue. However, this approach defines a full queue when one element is still unused.
//
// For example, [E,E,E,E] would be equivelent to [U,U,U,U].
// ^ ^
@@ -32,36 +29,10 @@
// TODO(bryan.bernhart@intel.com): Follow-up with ringbuffer optimization.
namespace dawn_native {
- static constexpr size_t INVALID_OFFSET = std::numeric_limits<size_t>::max();
-
- RingBuffer::RingBuffer(DeviceBase* device, size_t size) : mBufferSize(size), mDevice(device) {
- }
-
- MaybeError RingBuffer::Initialize() {
- DAWN_TRY_ASSIGN(mStagingBuffer, mDevice->CreateStagingBuffer(mBufferSize));
- DAWN_TRY(mStagingBuffer->Initialize());
- return {};
- }
-
- // Record allocations in a request when serial advances.
- // This method has been split from Tick() for testing.
- void RingBuffer::Track() {
- if (mCurrentRequestSize == 0)
- return;
- const Serial currentSerial = mDevice->GetPendingCommandSerial();
- if (mInflightRequests.Empty() || currentSerial > mInflightRequests.LastSerial()) {
- Request request;
- request.endOffset = mUsedEndOffset;
- request.size = mCurrentRequestSize;
-
- mInflightRequests.Enqueue(std::move(request), currentSerial);
- mCurrentRequestSize = 0; // reset
- }
+ RingBufferAllocator::RingBufferAllocator(size_t maxSize) : mMaxBlockSize(maxSize) {
}
- void RingBuffer::Tick(Serial lastCompletedSerial) {
- Track();
-
+ void RingBufferAllocator::Deallocate(Serial lastCompletedSerial) {
// Reclaim memory from previously recorded blocks.
for (Request& request : mInflightRequests.IterateUpTo(lastCompletedSerial)) {
mUsedStartOffset = request.endOffset;
@@ -72,23 +43,18 @@ namespace dawn_native {
mInflightRequests.ClearUpTo(lastCompletedSerial);
}
- size_t RingBuffer::GetSize() const {
- return mBufferSize;
+ size_t RingBufferAllocator::GetSize() const {
+ return mMaxBlockSize;
}
- size_t RingBuffer::GetUsedSize() const {
+ size_t RingBufferAllocator::GetUsedSize() const {
return mUsedSize;
}
- bool RingBuffer::Empty() const {
+ bool RingBufferAllocator::Empty() const {
return mInflightRequests.Empty();
}
- StagingBufferBase* RingBuffer::GetStagingBuffer() const {
- ASSERT(mStagingBuffer != nullptr);
- return mStagingBuffer.get();
- }
-
// Sub-allocate the ring-buffer by requesting a chunk of the specified size.
// This is a serial-based resource scheme, the life-span of resources (and the allocations) get
// tracked by GPU progress via serials. Memory can be reused by determining if the GPU has
@@ -96,55 +62,55 @@ namespace dawn_native {
// queue, which identifies an existing (or new) frames-worth of resources. Internally, the
// ring-buffer maintains offsets of 3 "memory" states: Free, Reclaimed, and Used. This is done
// in FIFO order as older frames would free resources before newer ones.
- UploadHandle RingBuffer::SubAllocate(size_t allocSize) {
- ASSERT(mStagingBuffer != nullptr);
-
+ size_t RingBufferAllocator::Allocate(size_t allocationSize, Serial serial) {
// Check if the buffer is full by comparing the used size.
// If the buffer is not split where waste occurs (e.g. cannot fit new sub-alloc in front), a
// subsequent sub-alloc could fail where the used size was previously adjusted to include
// the wasted.
- if (mUsedSize >= mBufferSize)
- return UploadHandle{};
+ if (allocationSize == 0 || mUsedSize >= mMaxBlockSize) {
+ return kInvalidOffset;
+ }
- size_t startOffset = INVALID_OFFSET;
+ size_t startOffset = kInvalidOffset;
// Check if the buffer is NOT split (i.e sub-alloc on ends)
if (mUsedStartOffset <= mUsedEndOffset) {
// Order is important (try to sub-alloc at end first).
// This is due to FIFO order where sub-allocs are inserted from left-to-right (when not
// wrapped).
- if (mUsedEndOffset + allocSize <= mBufferSize) {
+ if (mUsedEndOffset + allocationSize <= mMaxBlockSize) {
startOffset = mUsedEndOffset;
- mUsedEndOffset += allocSize;
- mUsedSize += allocSize;
- mCurrentRequestSize += allocSize;
- } else if (allocSize <= mUsedStartOffset) { // Try to sub-alloc at front.
- // Count the space at front in the request size so that a subsequent
+ mUsedEndOffset += allocationSize;
+ mUsedSize += allocationSize;
+ mCurrentRequestSize += allocationSize;
+ } else if (allocationSize <= mUsedStartOffset) { // Try to sub-alloc at front.
+ // Count the space at the end so that a subsequent
// sub-alloc cannot not succeed when the buffer is full.
- const size_t requestSize = (mBufferSize - mUsedEndOffset) + allocSize;
+ const size_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
startOffset = 0;
- mUsedEndOffset = allocSize;
+ mUsedEndOffset = allocationSize;
mUsedSize += requestSize;
mCurrentRequestSize += requestSize;
}
- } else if (mUsedEndOffset + allocSize <=
+ } else if (mUsedEndOffset + allocationSize <=
mUsedStartOffset) { // Otherwise, buffer is split where sub-alloc must be
// in-between.
startOffset = mUsedEndOffset;
- mUsedEndOffset += allocSize;
- mUsedSize += allocSize;
- mCurrentRequestSize += allocSize;
+ mUsedEndOffset += allocationSize;
+ mUsedSize += allocationSize;
+ mCurrentRequestSize += allocationSize;
}
- if (startOffset == INVALID_OFFSET)
- return UploadHandle{};
+ if (startOffset != kInvalidOffset) {
+ Request request;
+ request.endOffset = mUsedEndOffset;
+ request.size = mCurrentRequestSize;
- UploadHandle uploadHandle;
- uploadHandle.mappedBuffer =
- static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + startOffset;
- uploadHandle.startOffset = startOffset;
+ mInflightRequests.Enqueue(std::move(request), serial);
+ mCurrentRequestSize = 0; // reset
+ }
- return uploadHandle;
+ return startOffset;
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RingBuffer.h b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
index dbc51bcea9f..60ee6395c45 100644
--- a/chromium/third_party/dawn/src/dawn_native/RingBuffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
@@ -12,46 +12,33 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef DAWNNATIVE_RINGBUFFER_H_
-#define DAWNNATIVE_RINGBUFFER_H_
+#ifndef DAWNNATIVE_RINGBUFFERALLOCATOR_H_
+#define DAWNNATIVE_RINGBUFFERALLOCATOR_H_
#include "common/SerialQueue.h"
-#include "dawn_native/StagingBuffer.h"
+#include <limits>
#include <memory>
-// RingBuffer is the front-end implementation used to manage a ring buffer in GPU memory.
+// RingBufferAllocator is the front-end implementation used to manage a ring buffer in GPU memory.
namespace dawn_native {
- struct UploadHandle {
- uint8_t* mappedBuffer = nullptr;
- size_t startOffset = 0;
- StagingBufferBase* stagingBuffer = nullptr;
- };
-
- class DeviceBase;
-
- class RingBuffer {
+ class RingBufferAllocator {
public:
- RingBuffer(DeviceBase* device, size_t size);
- ~RingBuffer() = default;
+ RingBufferAllocator() = default;
+ RingBufferAllocator(size_t maxSize);
+ ~RingBufferAllocator() = default;
- MaybeError Initialize();
+ size_t Allocate(size_t allocationSize, Serial serial);
+ void Deallocate(Serial lastCompletedSerial);
- UploadHandle SubAllocate(size_t requestedSize);
-
- void Tick(Serial lastCompletedSerial);
size_t GetSize() const;
bool Empty() const;
size_t GetUsedSize() const;
- StagingBufferBase* GetStagingBuffer() const;
- // Seperated for testing.
- void Track();
+ static constexpr size_t kInvalidOffset = std::numeric_limits<size_t>::max();
private:
- std::unique_ptr<StagingBufferBase> mStagingBuffer;
-
struct Request {
size_t endOffset;
size_t size;
@@ -62,13 +49,11 @@ namespace dawn_native {
size_t mUsedEndOffset = 0; // Tail of used sub-alloc requests (in bytes).
size_t mUsedStartOffset = 0; // Head of used sub-alloc requests (in bytes).
- size_t mBufferSize = 0; // Max size of the ring buffer (in bytes).
+ size_t mMaxBlockSize = 0; // Max size of the ring buffer (in bytes).
size_t mUsedSize = 0; // Size of the sub-alloc requests (in bytes) of the ring buffer.
size_t mCurrentRequestSize =
0; // Size of the sub-alloc requests (in bytes) of the current serial.
-
- DeviceBase* mDevice;
};
} // namespace dawn_native
-#endif // DAWNNATIVE_RINGBUFFER_H_ \ No newline at end of file
+#endif // DAWNNATIVE_RINGBUFFERALLOCATOR_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
index ccb6e32a7b5..f19a123b344 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
@@ -27,6 +27,22 @@
namespace dawn_native {
+ namespace {
+ Format::Type SpirvCrossBaseTypeToFormatType(spirv_cross::SPIRType::BaseType spirvBaseType) {
+ switch (spirvBaseType) {
+ case spirv_cross::SPIRType::Float:
+ return Format::Float;
+ case spirv_cross::SPIRType::Int:
+ return Format::Sint;
+ case spirv_cross::SPIRType::UInt:
+ return Format::Uint;
+ default:
+ UNREACHABLE();
+ return Format::Other;
+ }
+ }
+ } // anonymous namespace
+
MaybeError ValidateShaderModuleDescriptor(DeviceBase*,
const ShaderModuleDescriptor* descriptor) {
if (descriptor->nextInChain != nullptr) {
@@ -74,6 +90,7 @@ namespace dawn_native {
: ObjectBase(device),
mCode(descriptor->code, descriptor->code + descriptor->codeSize),
mIsBlueprint(blueprint) {
+ mFragmentOutputFormatBaseTypes.fill(Format::Other);
}
ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -190,6 +207,25 @@ namespace dawn_native {
return;
}
}
+
+ for (const auto& fragmentOutput : resources.stage_outputs) {
+ ASSERT(
+ compiler.get_decoration_bitset(fragmentOutput.id).get(spv::DecorationLocation));
+ uint32_t location =
+ compiler.get_decoration(fragmentOutput.id, spv::DecorationLocation);
+ if (location >= kMaxColorAttachments) {
+ device->HandleError(dawn::ErrorType::Validation,
+ "Fragment output location over limits in the SPIRV");
+ return;
+ }
+
+ spirv_cross::SPIRType::BaseType shaderFragmentOutputBaseType =
+ compiler.get_type(fragmentOutput.base_type_id).basetype;
+ Format::Type formatType =
+ SpirvCrossBaseTypeToFormatType(shaderFragmentOutputBaseType);
+ ASSERT(formatType != Format::Type::Other);
+ mFragmentOutputFormatBaseTypes[location] = formatType;
+ }
}
}
@@ -203,6 +239,12 @@ namespace dawn_native {
return mUsedVertexAttributes;
}
+ const ShaderModuleBase::FragmentOutputBaseTypes& ShaderModuleBase::GetFragmentOutputBaseTypes()
+ const {
+ ASSERT(!IsError());
+ return mFragmentOutputFormatBaseTypes;
+ }
+
SingleShaderStage ShaderModuleBase::GetExecutionModel() const {
ASSERT(!IsError());
return mExecutionModel;
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
index 35c90207c44..f2c133c8353 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
@@ -17,6 +17,7 @@
#include "common/Constants.h"
#include "dawn_native/Error.h"
+#include "dawn_native/Format.h"
#include "dawn_native/Forward.h"
#include "dawn_native/ObjectBase.h"
#include "dawn_native/PerStage.h"
@@ -61,6 +62,11 @@ namespace dawn_native {
const std::bitset<kMaxVertexAttributes>& GetUsedVertexAttributes() const;
SingleShaderStage GetExecutionModel() const;
+ // An array to record the basic types (float, int and uint) of the fragment shader outputs
+ // or Format::Type::Other means the fragment shader output is unused.
+ using FragmentOutputBaseTypes = std::array<Format::Type, kMaxColorAttachments>;
+ const FragmentOutputBaseTypes& GetFragmentOutputBaseTypes() const;
+
bool IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout);
// Functors necessary for the unordered_set<ShaderModuleBase*>-based cache.
@@ -84,6 +90,8 @@ namespace dawn_native {
ModuleBindingInfo mBindingInfo;
std::bitset<kMaxVertexAttributes> mUsedVertexAttributes;
SingleShaderStage mExecutionModel;
+
+ FragmentOutputBaseTypes mFragmentOutputFormatBaseTypes;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
index d5f685a948d..8194fe15462 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
@@ -32,7 +32,7 @@ namespace dawn_native {
UNREACHABLE();
}
- void OnBeforePresent(TextureBase* texture) override {
+ MaybeError OnBeforePresent(TextureBase* texture) override {
UNREACHABLE();
}
};
@@ -127,7 +127,8 @@ namespace dawn_native {
}
ASSERT(!IsError());
- OnBeforePresent(texture);
+ if (GetDevice()->ConsumedError(OnBeforePresent(texture)))
+ return;
mImplementation.Present(mImplementation.userData);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.h b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
index 8b0e9fdb5ab..c9b65028796 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.h
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
@@ -47,7 +47,7 @@ namespace dawn_native {
const DawnSwapChainImplementation& GetImplementation();
virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
- virtual void OnBeforePresent(TextureBase* texture) = 0;
+ virtual MaybeError OnBeforePresent(TextureBase* texture) = 0;
private:
MaybeError ValidateConfigure(dawn::TextureFormat format,
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.cpp b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
index f6370a86832..bcaa923d914 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
@@ -235,6 +235,11 @@ namespace dawn_native {
DAWN_TRY(ValidateTextureFormat(descriptor->format));
+ DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
+ if (descriptor->aspect != dawn::TextureAspect::All) {
+ return DAWN_VALIDATION_ERROR("Texture aspect must be 'all'");
+ }
+
// TODO(jiawei.shao@intel.com): check stuff based on resource limits
if (descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0) {
return DAWN_VALIDATION_ERROR("Cannot create an empty texture view");
@@ -407,7 +412,8 @@ namespace dawn_native {
return true;
}
- void TextureBase::SetIsSubresourceContentInitialized(uint32_t baseMipLevel,
+ void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
+ uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
uint32_t layerCount) {
@@ -417,7 +423,7 @@ namespace dawn_native {
++arrayLayer) {
uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer);
ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
- mIsSubresourceContentInitializedAtIndex[subresourceIndex] = true;
+ mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
}
}
}
@@ -492,6 +498,7 @@ namespace dawn_native {
: ObjectBase(texture->GetDevice()),
mTexture(texture),
mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
+ mDimension(descriptor->dimension),
mBaseMipLevel(descriptor->baseMipLevel),
mMipLevelCount(descriptor->mipLevelCount),
mBaseArrayLayer(descriptor->baseArrayLayer),
@@ -522,6 +529,11 @@ namespace dawn_native {
return mFormat;
}
+ dawn::TextureViewDimension TextureViewBase::GetDimension() const {
+ ASSERT(!IsError());
+ return mDimension;
+ }
+
uint32_t TextureViewBase::GetBaseMipLevel() const {
ASSERT(!IsError());
return mBaseMipLevel;
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.h b/chromium/third_party/dawn/src/dawn_native/Texture.h
index 6e26f997c83..fafd46edcca 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.h
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.h
@@ -62,7 +62,8 @@ namespace dawn_native {
uint32_t levelCount,
uint32_t baseArrayLayer,
uint32_t layerCount) const;
- void SetIsSubresourceContentInitialized(uint32_t baseMipLevel,
+ void SetIsSubresourceContentInitialized(bool isInitialized,
+ uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
uint32_t layerCount);
@@ -115,6 +116,7 @@ namespace dawn_native {
TextureBase* GetTexture();
const Format& GetFormat() const;
+ dawn::TextureViewDimension GetDimension() const;
uint32_t GetBaseMipLevel() const;
uint32_t GetLevelCount() const;
uint32_t GetBaseArrayLayer() const;
@@ -127,6 +129,7 @@ namespace dawn_native {
// TODO(cwallez@chromium.org): This should be deduplicated in the Device
const Format& mFormat;
+ dawn::TextureViewDimension mDimension;
uint32_t mBaseMipLevel;
uint32_t mMipLevelCount;
uint32_t mBaseArrayLayer;
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
index bbb69a328a7..7cc6f355c1d 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
@@ -57,6 +57,11 @@ namespace dawn_native {
"Clears resource to zero on first usage. This initializes the resource "
"so that no dirty bits from recycled memory is present in the new resource.",
"https://bugs.chromium.org/p/dawn/issues/detail?id=145"}},
+ {Toggle::TurnOffVsync,
+ {"turn_off_vsync",
+ "Turn off vsync when rendering. In order to do performance test or run perf tests, "
+ "turn off vsync so that the fps can exeed 60.",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=237"}},
{Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
{"use_temporary_buffer_in_texture_to_texture_copy",
"Split texture-to-texture copy into two copies: copy from source texture into a "
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.h b/chromium/third_party/dawn/src/dawn_native/Toggles.h
index 73da01eacd5..d5265946dc1 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.h
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.h
@@ -28,6 +28,7 @@ namespace dawn_native {
NonzeroClearResourcesOnCreationForTesting,
AlwaysResolveIntoZeroLevelAndLayer,
LazyClearResourceOnFirstUse,
+ TurnOffVsync,
UseTemporaryBufferInCompressedTextureToTextureCopy,
EnumCount,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
index 275f1ce3faa..0d9292b3bc8 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
@@ -44,7 +44,7 @@ namespace dawn_native { namespace d3d12 {
for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
// It's not necessary to create descriptors in descriptor heap for dynamic resources.
// So skip allocating descriptors in descriptor heaps for dynamic buffers.
- if (layout.dynamic[bindingIndex]) {
+ if (layout.hasDynamicOffset[bindingIndex]) {
continue;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
index ec81ad6d0ca..e0627b4be30 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
@@ -27,7 +27,7 @@ namespace dawn_native { namespace d3d12 {
// For dynamic resources, Dawn uses root descriptor in D3D12 backend.
// So there is no need to allocate the descriptor from descriptor heap. Skip counting
// dynamic resources for calculating size of descriptor heap.
- if (groupInfo.dynamic[binding]) {
+ if (groupInfo.hasDynamicOffset[binding]) {
continue;
}
@@ -94,7 +94,7 @@ namespace dawn_native { namespace d3d12 {
descriptorOffsets[Sampler] = 0;
for (uint32_t binding : IterateBitSet(groupInfo.mask)) {
- if (groupInfo.dynamic[binding]) {
+ if (groupInfo.hasDynamicOffset[binding]) {
// Dawn is using values in mBindingOffsets to decide register number in HLSL.
// Root descriptor needs to set this value to set correct register number in
// generated HLSL shader.
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
index 148a7408a35..a0a208305f3 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
@@ -17,8 +17,9 @@
#include "common/Assert.h"
#include "common/Constants.h"
#include "common/Math.h"
+#include "dawn_native/d3d12/CommandRecordingContext.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/ResourceHeapD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -125,13 +126,14 @@ namespace dawn_native { namespace d3d12 {
}
ComPtr<ID3D12Resource> Buffer::GetD3D12Resource() const {
- return ToBackend(mResourceAllocation.GetResourceHeap())->GetD3D12Resource();
+ return mResourceAllocation.GetD3D12Resource();
}
// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
// ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
// cause subsequent errors.
- bool Buffer::TransitionUsageAndGetResourceBarrier(D3D12_RESOURCE_BARRIER* barrier,
+ bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
dawn::BufferUsage newUsage) {
// Resources in upload and readback heaps must be kept in the COPY_SOURCE/DEST state
if (mFixedResourceState) {
@@ -188,17 +190,17 @@ namespace dawn_native { namespace d3d12 {
return true;
}
- void Buffer::TransitionUsageNow(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void Buffer::TransitionUsageNow(CommandRecordingContext* commandContext,
dawn::BufferUsage usage) {
D3D12_RESOURCE_BARRIER barrier;
- if (TransitionUsageAndGetResourceBarrier(&barrier, usage)) {
- commandList->ResourceBarrier(1, &barrier);
+ if (TransitionUsageAndGetResourceBarrier(commandContext, &barrier, usage)) {
+ commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
}
}
D3D12_GPU_VIRTUAL_ADDRESS Buffer::GetVA() const {
- return ToBackend(mResourceAllocation.GetResourceHeap())->GetGPUPointer();
+ return mResourceAllocation.GetGPUPointer();
}
void Buffer::OnMapCommandSerialFinished(uint32_t mapSerial, void* data, bool isWrite) {
@@ -216,8 +218,9 @@ namespace dawn_native { namespace d3d12 {
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
mWrittenMappedRange = {0, GetSize()};
- ASSERT_SUCCESS(GetD3D12Resource()->Map(0, &mWrittenMappedRange,
- reinterpret_cast<void**>(mappedPointer)));
+ DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &mWrittenMappedRange,
+ reinterpret_cast<void**>(mappedPointer)),
+ "D3D12 map at creation"));
return {};
}
@@ -225,7 +228,9 @@ namespace dawn_native { namespace d3d12 {
mWrittenMappedRange = {};
D3D12_RANGE readRange = {0, GetSize()};
char* data = nullptr;
- ASSERT_SUCCESS(GetD3D12Resource()->Map(0, &readRange, reinterpret_cast<void**>(&data)));
+ DAWN_TRY(
+ CheckHRESULT(GetD3D12Resource()->Map(0, &readRange, reinterpret_cast<void**>(&data)),
+ "D3D12 map read async"));
// There is no need to transition the resource to a new state: D3D12 seems to make the GPU
// writes available when the fence is passed.
MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapRequestTracker();
@@ -236,8 +241,9 @@ namespace dawn_native { namespace d3d12 {
MaybeError Buffer::MapWriteAsyncImpl(uint32_t serial) {
mWrittenMappedRange = {0, GetSize()};
char* data = nullptr;
- ASSERT_SUCCESS(
- GetD3D12Resource()->Map(0, &mWrittenMappedRange, reinterpret_cast<void**>(&data)));
+ DAWN_TRY(CheckHRESULT(
+ GetD3D12Resource()->Map(0, &mWrittenMappedRange, reinterpret_cast<void**>(&data)),
+ "D3D12 map write async"));
// There is no need to transition the resource to a new state: D3D12 seems to make the CPU
// writes available on queue submission.
MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapRequestTracker();
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
index 7a9b433091d..289c4ecc8f1 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
@@ -18,11 +18,12 @@
#include "common/SerialQueue.h"
#include "dawn_native/Buffer.h"
-#include "dawn_native/ResourceMemoryAllocation.h"
+#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
+ class CommandRecordingContext;
class Device;
class Buffer : public BufferBase {
@@ -36,10 +37,10 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12Resource> GetD3D12Resource() const;
D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
void OnMapCommandSerialFinished(uint32_t mapSerial, void* data, bool isWrite);
- bool TransitionUsageAndGetResourceBarrier(D3D12_RESOURCE_BARRIER* barrier,
+ bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
dawn::BufferUsage newUsage);
- void TransitionUsageNow(ComPtr<ID3D12GraphicsCommandList> commandList,
- dawn::BufferUsage usage);
+ void TransitionUsageNow(CommandRecordingContext* commandContext, dawn::BufferUsage usage);
private:
// Dawn API
@@ -51,7 +52,7 @@ namespace dawn_native { namespace d3d12 {
bool IsMapWritable() const override;
virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
- ResourceMemoryAllocation mResourceAllocation;
+ ResourceHeapAllocation mResourceAllocation;
bool mFixedResourceState = false;
dawn::BufferUsage mLastUsage = dawn::BufferUsage::None;
Serial mLastUsedSerial = UINT64_MAX;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.cpp
index 90f7a5a8951..8c6029e5c31 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/d3d12/CommandAllocatorManager.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "common/Assert.h"
@@ -26,12 +27,12 @@ namespace dawn_native { namespace d3d12 {
mFreeAllocators.set();
}
- ComPtr<ID3D12CommandAllocator> CommandAllocatorManager::ReserveCommandAllocator() {
+ ResultOrError<ID3D12CommandAllocator*> CommandAllocatorManager::ReserveCommandAllocator() {
// If there are no free allocators, get the oldest serial in flight and wait on it
if (mFreeAllocators.none()) {
const uint64_t firstSerial = mInFlightCommandAllocators.FirstSerial();
- device->WaitForSerial(firstSerial);
- Tick(firstSerial);
+ DAWN_TRY(device->WaitForSerial(firstSerial));
+ DAWN_TRY(Tick(firstSerial));
}
ASSERT(mFreeAllocators.any());
@@ -42,8 +43,10 @@ namespace dawn_native { namespace d3d12 {
if (firstFreeIndex >= mAllocatorCount) {
ASSERT(firstFreeIndex == mAllocatorCount);
mAllocatorCount++;
- ASSERT_SUCCESS(device->GetD3D12Device()->CreateCommandAllocator(
- D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(&mCommandAllocators[firstFreeIndex])));
+ DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateCommandAllocator(
+ D3D12_COMMAND_LIST_TYPE_DIRECT,
+ IID_PPV_ARGS(&mCommandAllocators[firstFreeIndex])),
+ "D3D12 create command allocator"));
}
// Mark the command allocator as used
@@ -53,17 +56,17 @@ namespace dawn_native { namespace d3d12 {
// ExecuteCommandLists
mInFlightCommandAllocators.Enqueue({mCommandAllocators[firstFreeIndex], firstFreeIndex},
device->GetPendingCommandSerial());
-
- return mCommandAllocators[firstFreeIndex];
+ return mCommandAllocators[firstFreeIndex].Get();
}
- void CommandAllocatorManager::Tick(uint64_t lastCompletedSerial) {
+ MaybeError CommandAllocatorManager::Tick(uint64_t lastCompletedSerial) {
// Reset all command allocators that are no longer in flight
for (auto it : mInFlightCommandAllocators.IterateUpTo(lastCompletedSerial)) {
- ASSERT_SUCCESS(it.commandAllocator->Reset());
+ DAWN_TRY(CheckHRESULT(it.commandAllocator->Reset(), "D3D12 reset command allocator"));
mFreeAllocators.set(it.index);
}
mInFlightCommandAllocators.ClearUpTo(lastCompletedSerial);
+ return {};
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.h
index fd7c8ce27dd..654d3be467c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.h
@@ -18,6 +18,7 @@
#include "dawn_native/d3d12/d3d12_platform.h"
#include "common/SerialQueue.h"
+#include "dawn_native/Error.h"
#include <bitset>
@@ -31,8 +32,8 @@ namespace dawn_native { namespace d3d12 {
// A CommandAllocator that is reserved must be used on the next ExecuteCommandLists
// otherwise its commands may be reset before execution has completed on the GPU
- ComPtr<ID3D12CommandAllocator> ReserveCommandAllocator();
- void Tick(uint64_t lastCompletedSerial);
+ ResultOrError<ID3D12CommandAllocator*> ReserveCommandAllocator();
+ MaybeError Tick(uint64_t lastCompletedSerial);
private:
Device* device;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
index 7ea4e4095ab..d095395f60c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
@@ -15,12 +15,14 @@
#include "dawn_native/d3d12/CommandBufferD3D12.h"
#include "common/Assert.h"
+#include "dawn_native/BindGroupTracker.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/d3d12/BindGroupD3D12.h"
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
#include "dawn_native/d3d12/BufferD3D12.h"
+#include "dawn_native/d3d12/CommandRecordingContext.h"
#include "dawn_native/d3d12/ComputePipelineD3D12.h"
#include "dawn_native/d3d12/DescriptorHeapAllocator.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
@@ -38,6 +40,7 @@
namespace dawn_native { namespace d3d12 {
namespace {
+
DXGI_FORMAT DXGIIndexFormat(dawn::IndexFormat format) {
switch (format) {
case dawn::IndexFormat::Uint16:
@@ -63,18 +66,24 @@ namespace dawn_native { namespace d3d12 {
return false;
}
+ struct OMSetRenderTargetArgs {
+ unsigned int numRTVs = 0;
+ std::array<D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments> RTVs = {};
+ D3D12_CPU_DESCRIPTOR_HANDLE dsv = {};
+ };
+
} // anonymous namespace
- class BindGroupStateTracker {
+ class BindGroupStateTracker : public BindGroupTrackerBase<BindGroup*, false> {
public:
- BindGroupStateTracker(Device* device) : mDevice(device) {
+ BindGroupStateTracker(Device* device) : BindGroupTrackerBase(), mDevice(device) {
}
void SetInComputePass(bool inCompute_) {
mInCompute = inCompute_;
}
- void AllocateDescriptorHeaps(Device* device) {
+ MaybeError AllocateDescriptorHeaps(Device* device) {
// This function should only be called once.
ASSERT(mCbvSrvUavGPUDescriptorHeap.Get() == nullptr &&
mSamplerGPUDescriptorHeap.Get() == nullptr);
@@ -82,18 +91,21 @@ namespace dawn_native { namespace d3d12 {
DescriptorHeapAllocator* descriptorHeapAllocator = device->GetDescriptorHeapAllocator();
if (mCbvSrvUavDescriptorHeapSize > 0) {
- mCbvSrvUavGPUDescriptorHeap = descriptorHeapAllocator->AllocateGPUHeap(
- D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, mCbvSrvUavDescriptorHeapSize);
+ DAWN_TRY_ASSIGN(
+ mCbvSrvUavGPUDescriptorHeap,
+ descriptorHeapAllocator->AllocateGPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV,
+ mCbvSrvUavDescriptorHeapSize));
}
if (mSamplerDescriptorHeapSize > 0) {
- mSamplerGPUDescriptorHeap = descriptorHeapAllocator->AllocateGPUHeap(
- D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, mSamplerDescriptorHeapSize);
+ DAWN_TRY_ASSIGN(mSamplerGPUDescriptorHeap, descriptorHeapAllocator->AllocateGPUHeap(
+ D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER,
+ mSamplerDescriptorHeapSize));
}
uint32_t cbvSrvUavDescriptorIndex = 0;
uint32_t samplerDescriptorIndex = 0;
- for (BindGroup* group : mBindGroupsList) {
+ for (BindGroup* group : mBindGroupsToAllocate) {
ASSERT(group);
ASSERT(cbvSrvUavDescriptorIndex +
ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount() <=
@@ -107,44 +119,58 @@ namespace dawn_native { namespace d3d12 {
ASSERT(cbvSrvUavDescriptorIndex == mCbvSrvUavDescriptorHeapSize);
ASSERT(samplerDescriptorIndex == mSamplerDescriptorHeapSize);
+
+ return {};
}
// This function must only be called before calling AllocateDescriptorHeaps().
void TrackSetBindGroup(BindGroup* group, uint32_t index, uint32_t indexInSubmit) {
if (mBindGroups[index] != group) {
mBindGroups[index] = group;
-
if (!group->TestAndSetCounted(mDevice->GetPendingCommandSerial(), indexInSubmit)) {
const BindGroupLayout* layout = ToBackend(group->GetLayout());
mCbvSrvUavDescriptorHeapSize += layout->GetCbvUavSrvDescriptorCount();
mSamplerDescriptorHeapSize += layout->GetSamplerDescriptorCount();
- mBindGroupsList.push_back(group);
+ mBindGroupsToAllocate.push_back(group);
}
}
}
- // This function must only be called before calling AllocateDescriptorHeaps().
- void TrackInheritedGroups(PipelineLayout* oldLayout,
- PipelineLayout* newLayout,
- uint32_t indexInSubmit) {
- if (oldLayout == nullptr) {
- return;
+ void Apply(ID3D12GraphicsCommandList* commandList) {
+ for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, mBindGroups[index],
+ mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
+ }
+ DidApply();
+ }
+
+ void Reset() {
+ for (uint32_t i = 0; i < kMaxBindGroups; ++i) {
+ mBindGroups[i] = nullptr;
}
+ }
- uint32_t inheritUntil = oldLayout->GroupsInheritUpTo(newLayout);
- for (uint32_t i = 0; i < inheritUntil; ++i) {
- TrackSetBindGroup(mBindGroups[i], i, indexInSubmit);
+ void SetID3D12DescriptorHeaps(ComPtr<ID3D12GraphicsCommandList> commandList) {
+ ASSERT(commandList != nullptr);
+ ID3D12DescriptorHeap* descriptorHeaps[2] = {mCbvSrvUavGPUDescriptorHeap.Get(),
+ mSamplerGPUDescriptorHeap.Get()};
+ if (descriptorHeaps[0] && descriptorHeaps[1]) {
+ commandList->SetDescriptorHeaps(2, descriptorHeaps);
+ } else if (descriptorHeaps[0]) {
+ commandList->SetDescriptorHeaps(1, descriptorHeaps);
+ } else if (descriptorHeaps[1]) {
+ commandList->SetDescriptorHeaps(1, &descriptorHeaps[1]);
}
}
- void SetBindGroup(ComPtr<ID3D12GraphicsCommandList> commandList,
- PipelineLayout* pipelineLayout,
- BindGroup* group,
- uint32_t index,
- uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets,
- bool force = false) {
+ private:
+ void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
+ PipelineLayout* pipelineLayout,
+ uint32_t index,
+ BindGroup* group,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets) {
// Usually, the application won't set the same offsets many times,
// so always try to apply dynamic offsets even if the offsets stay the same
if (dynamicOffsetCount) {
@@ -153,7 +179,7 @@ namespace dawn_native { namespace d3d12 {
group->GetLayout()->GetBindingInfo();
uint32_t currentDynamicBufferIndex = 0;
- for (uint32_t bindingIndex : IterateBitSet(layout.dynamic)) {
+ for (uint32_t bindingIndex : IterateBitSet(layout.hasDynamicOffset)) {
ASSERT(dynamicOffsetCount > 0);
uint32_t parameterIndex =
pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
@@ -193,96 +219,50 @@ namespace dawn_native { namespace d3d12 {
break;
}
- // Record current dynamic offsets for inheriting
- mLastDynamicOffsets[index][currentDynamicBufferIndex] = dynamicOffset;
++currentDynamicBufferIndex;
}
}
- if (mBindGroups[index] != group || force) {
- mBindGroups[index] = group;
- uint32_t cbvUavSrvCount =
- ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
- uint32_t samplerCount = ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
-
- if (cbvUavSrvCount > 0) {
- uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
-
- if (mInCompute) {
- commandList->SetComputeRootDescriptorTable(
- parameterIndex, mCbvSrvUavGPUDescriptorHeap.GetGPUHandle(
- group->GetCbvUavSrvHeapOffset()));
- } else {
- commandList->SetGraphicsRootDescriptorTable(
- parameterIndex, mCbvSrvUavGPUDescriptorHeap.GetGPUHandle(
- group->GetCbvUavSrvHeapOffset()));
- }
- }
-
- if (samplerCount > 0) {
- uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
-
- if (mInCompute) {
- commandList->SetComputeRootDescriptorTable(
- parameterIndex,
- mSamplerGPUDescriptorHeap.GetGPUHandle(group->GetSamplerHeapOffset()));
- } else {
- commandList->SetGraphicsRootDescriptorTable(
- parameterIndex,
- mSamplerGPUDescriptorHeap.GetGPUHandle(group->GetSamplerHeapOffset()));
- }
- }
- }
- }
-
- void SetInheritedBindGroups(ComPtr<ID3D12GraphicsCommandList> commandList,
- PipelineLayout* oldLayout,
- PipelineLayout* newLayout) {
- if (oldLayout == nullptr) {
+ // It's not necessary to update descriptor tables if only the dynamic offset changed.
+ if (!mDirtyBindGroups[index]) {
return;
}
- uint32_t inheritUntil = oldLayout->GroupsInheritUpTo(newLayout);
- for (uint32_t i = 0; i < inheritUntil; ++i) {
- const BindGroupLayout* layout = ToBackend(mBindGroups[i]->GetLayout());
- const uint32_t dynamicBufferCount = layout->GetDynamicBufferCount();
+ uint32_t cbvUavSrvCount = ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
+ uint32_t samplerCount = ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
+
+ if (cbvUavSrvCount > 0) {
+ uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
- // Inherit dynamic offsets
- if (dynamicBufferCount > 0) {
- SetBindGroup(commandList, newLayout, mBindGroups[i], i, dynamicBufferCount,
- mLastDynamicOffsets[i].data(), true);
+ if (mInCompute) {
+ commandList->SetComputeRootDescriptorTable(
+ parameterIndex,
+ mCbvSrvUavGPUDescriptorHeap.GetGPUHandle(group->GetCbvUavSrvHeapOffset()));
} else {
- SetBindGroup(commandList, newLayout, mBindGroups[i], i, 0, nullptr, true);
+ commandList->SetGraphicsRootDescriptorTable(
+ parameterIndex,
+ mCbvSrvUavGPUDescriptorHeap.GetGPUHandle(group->GetCbvUavSrvHeapOffset()));
}
}
- }
- void Reset() {
- for (uint32_t i = 0; i < kMaxBindGroups; ++i) {
- mBindGroups[i] = nullptr;
- }
- }
+ if (samplerCount > 0) {
+ uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
- void SetID3D12DescriptorHeaps(ComPtr<ID3D12GraphicsCommandList> commandList) {
- ASSERT(commandList != nullptr);
- ID3D12DescriptorHeap* descriptorHeaps[2] = {mCbvSrvUavGPUDescriptorHeap.Get(),
- mSamplerGPUDescriptorHeap.Get()};
- if (descriptorHeaps[0] && descriptorHeaps[1]) {
- commandList->SetDescriptorHeaps(2, descriptorHeaps);
- } else if (descriptorHeaps[0]) {
- commandList->SetDescriptorHeaps(1, descriptorHeaps);
- } else if (descriptorHeaps[1]) {
- commandList->SetDescriptorHeaps(1, &descriptorHeaps[1]);
+ if (mInCompute) {
+ commandList->SetComputeRootDescriptorTable(
+ parameterIndex,
+ mSamplerGPUDescriptorHeap.GetGPUHandle(group->GetSamplerHeapOffset()));
+ } else {
+ commandList->SetGraphicsRootDescriptorTable(
+ parameterIndex,
+ mSamplerGPUDescriptorHeap.GetGPUHandle(group->GetSamplerHeapOffset()));
+ }
}
}
- private:
uint32_t mCbvSrvUavDescriptorHeapSize = 0;
uint32_t mSamplerDescriptorHeapSize = 0;
- std::array<BindGroup*, kMaxBindGroups> mBindGroups = {};
- std::deque<BindGroup*> mBindGroupsList = {};
- std::array<std::array<uint64_t, kMaxDynamicBufferCount>, kMaxBindGroups>
- mLastDynamicOffsets;
+ std::deque<BindGroup*> mBindGroupsToAllocate = {};
bool mInCompute = false;
DescriptorHeapHandle mCbvSrvUavGPUDescriptorHeap = {};
@@ -291,12 +271,6 @@ namespace dawn_native { namespace d3d12 {
Device* mDevice;
};
- struct OMSetRenderTargetArgs {
- unsigned int numRTVs = 0;
- std::array<D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments> RTVs = {};
- D3D12_CPU_DESCRIPTOR_HANDLE dsv = {};
- };
-
class RenderPassDescriptorHeapTracker {
public:
RenderPassDescriptorHeapTracker(Device* device) : mDevice(device) {
@@ -313,20 +287,23 @@ namespace dawn_native { namespace d3d12 {
}
}
- void AllocateRTVAndDSVHeaps() {
+ MaybeError AllocateRTVAndDSVHeaps() {
// This function should only be called once.
DAWN_ASSERT(mRTVHeap.Get() == nullptr && mDSVHeap.Get() == nullptr);
DescriptorHeapAllocator* allocator = mDevice->GetDescriptorHeapAllocator();
if (mNumRTVs > 0) {
- mRTVHeap = allocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_RTV, mNumRTVs);
+ DAWN_TRY_ASSIGN(
+ mRTVHeap, allocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_RTV, mNumRTVs));
}
if (mNumDSVs > 0) {
- mDSVHeap = allocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_DSV, mNumDSVs);
+ DAWN_TRY_ASSIGN(
+ mDSVHeap, allocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_DSV, mNumDSVs));
}
+ return {};
}
- // TODO(jiawei.shao@intel.com): use hash map <RenderPass, OMSetRenderTargetArgs> as cache to
- // avoid redundant RTV and DSV memory allocations.
+ // TODO(jiawei.shao@intel.com): use hash map <RenderPass, OMSetRenderTargetArgs> as
+ // cache to avoid redundant RTV and DSV memory allocations.
OMSetRenderTargetArgs GetSubpassOMSetRenderTargetArgs(BeginRenderPassCmd* renderPass) {
OMSetRenderTargetArgs args = {};
@@ -380,33 +357,107 @@ namespace dawn_native { namespace d3d12 {
namespace {
- void AllocateAndSetDescriptorHeaps(Device* device,
- BindGroupStateTracker* bindingTracker,
- RenderPassDescriptorHeapTracker* renderPassTracker,
- CommandIterator* commands,
- uint32_t indexInSubmit) {
+ class VertexBufferTracker {
+ public:
+ void OnSetVertexBuffer(uint32_t slot, Buffer* buffer, uint64_t offset) {
+ mStartSlot = std::min(mStartSlot, slot);
+ mEndSlot = std::max(mEndSlot, slot + 1);
+
+ auto* d3d12BufferView = &mD3D12BufferViews[slot];
+ d3d12BufferView->BufferLocation = buffer->GetVA() + offset;
+ d3d12BufferView->SizeInBytes = buffer->GetSize() - offset;
+ // The bufferView stride is set based on the input state before a draw.
+ }
+
+ void Apply(ID3D12GraphicsCommandList* commandList,
+ const RenderPipeline* renderPipeline) {
+ ASSERT(renderPipeline != nullptr);
+
+ std::bitset<kMaxVertexBuffers> inputsMask = renderPipeline->GetInputsSetMask();
+
+ uint32_t startSlot = mStartSlot;
+ uint32_t endSlot = mEndSlot;
+
+ // If the input state has changed, we need to update the StrideInBytes
+ // for the D3D12 buffer views. We also need to extend the dirty range to
+ // touch all these slots because the stride may have changed.
+ if (mLastAppliedRenderPipeline != renderPipeline) {
+ mLastAppliedRenderPipeline = renderPipeline;
+
+ for (uint32_t slot : IterateBitSet(inputsMask)) {
+ startSlot = std::min(startSlot, slot);
+ endSlot = std::max(endSlot, slot + 1);
+ mD3D12BufferViews[slot].StrideInBytes =
+ renderPipeline->GetInput(slot).stride;
+ }
+ }
+
+ if (endSlot <= startSlot) {
+ return;
+ }
+
+ // mD3D12BufferViews is kept up to date with the most recent data passed
+ // to SetVertexBuffer. This makes it correct to only track the start
+ // and end of the dirty range. When Apply is called,
+ // we will at worst set non-dirty vertex buffers in duplicate.
+ uint32_t count = endSlot - startSlot;
+ commandList->IASetVertexBuffers(startSlot, count, &mD3D12BufferViews[startSlot]);
+
+ mStartSlot = kMaxVertexBuffers;
+ mEndSlot = 0;
+ }
+
+ private:
+ // startSlot and endSlot indicate the range of dirty vertex buffers.
+ // If there are multiple calls to SetVertexBuffer, the start and end
+ // represent the union of the dirty ranges (the union may have non-dirty
+ // data in the middle of the range).
+ const RenderPipeline* mLastAppliedRenderPipeline = nullptr;
+ uint32_t mStartSlot = kMaxVertexBuffers;
+ uint32_t mEndSlot = 0;
+ std::array<D3D12_VERTEX_BUFFER_VIEW, kMaxVertexBuffers> mD3D12BufferViews = {};
+ };
+
+ class IndexBufferTracker {
+ public:
+ void OnSetIndexBuffer(Buffer* buffer, uint64_t offset) {
+ mD3D12BufferView.BufferLocation = buffer->GetVA() + offset;
+ mD3D12BufferView.SizeInBytes = buffer->GetSize() - offset;
+
+ // We don't need to dirty the state unless BufferLocation or SizeInBytes
+ // change, but most of the time this will always be the case.
+ mLastAppliedIndexFormat = DXGI_FORMAT_UNKNOWN;
+ }
+
+ void OnSetPipeline(const RenderPipelineBase* pipeline) {
+ mD3D12BufferView.Format =
+ DXGIIndexFormat(pipeline->GetVertexInputDescriptor()->indexFormat);
+ }
+
+ void Apply(ID3D12GraphicsCommandList* commandList) {
+ if (mD3D12BufferView.Format == mLastAppliedIndexFormat) {
+ return;
+ }
+
+ commandList->IASetIndexBuffer(&mD3D12BufferView);
+ mLastAppliedIndexFormat = mD3D12BufferView.Format;
+ }
+
+ private:
+ DXGI_FORMAT mLastAppliedIndexFormat = DXGI_FORMAT_UNKNOWN;
+ D3D12_INDEX_BUFFER_VIEW mD3D12BufferView = {};
+ };
+
+ MaybeError AllocateAndSetDescriptorHeaps(Device* device,
+ BindGroupStateTracker* bindingTracker,
+ RenderPassDescriptorHeapTracker* renderPassTracker,
+ CommandIterator* commands,
+ uint32_t indexInSubmit) {
{
Command type;
- PipelineLayout* lastLayout = nullptr;
auto HandleCommand = [&](CommandIterator* commands, Command type) {
switch (type) {
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd =
- commands->NextCommand<SetComputePipelineCmd>();
- PipelineLayout* layout = ToBackend(cmd->pipeline->GetLayout());
- bindingTracker->TrackInheritedGroups(lastLayout, layout, indexInSubmit);
- lastLayout = layout;
- } break;
-
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd =
- commands->NextCommand<SetRenderPipelineCmd>();
- PipelineLayout* layout = ToBackend(cmd->pipeline->GetLayout());
- bindingTracker->TrackInheritedGroups(lastLayout, layout, indexInSubmit);
- lastLayout = layout;
- } break;
-
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
BindGroup* group = ToBackend(cmd->group.Get());
@@ -447,11 +498,12 @@ namespace dawn_native { namespace d3d12 {
commands->Reset();
}
- renderPassTracker->AllocateRTVAndDSVHeaps();
- bindingTracker->AllocateDescriptorHeaps(device);
+ DAWN_TRY(renderPassTracker->AllocateRTVAndDSVHeaps());
+ DAWN_TRY(bindingTracker->AllocateDescriptorHeaps(device));
+ return {};
}
- void ResolveMultisampledRenderPass(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void ResolveMultisampledRenderPass(CommandRecordingContext* commandContext,
BeginRenderPassCmd* renderPass) {
ASSERT(renderPass != nullptr);
@@ -468,8 +520,10 @@ namespace dawn_native { namespace d3d12 {
Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
// Transition the usages of the color attachment and resolve target.
- colorTexture->TransitionUsageNow(commandList, D3D12_RESOURCE_STATE_RESOLVE_SOURCE);
- resolveTexture->TransitionUsageNow(commandList, D3D12_RESOURCE_STATE_RESOLVE_DEST);
+ colorTexture->TransitionUsageNow(commandContext,
+ D3D12_RESOURCE_STATE_RESOLVE_SOURCE);
+ resolveTexture->TransitionUsageNow(commandContext,
+ D3D12_RESOURCE_STATE_RESOLVE_DEST);
// Do MSAA resolve with ResolveSubResource().
ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
@@ -477,7 +531,7 @@ namespace dawn_native { namespace d3d12 {
const uint32_t resolveTextureSubresourceIndex = resolveTexture->GetSubresourceIndex(
resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer());
constexpr uint32_t kColorTextureSubresourceIndex = 0;
- commandList->ResolveSubresource(
+ commandContext->GetCommandList()->ResolveSubresource(
resolveTextureHandle, resolveTextureSubresourceIndex, colorTextureHandle,
kColorTextureSubresourceIndex, colorTexture->GetD3D12Format());
}
@@ -494,32 +548,37 @@ namespace dawn_native { namespace d3d12 {
FreeCommands(&mCommands);
}
- void CommandBuffer::RecordCommands(ComPtr<ID3D12GraphicsCommandList> commandList,
- uint32_t indexInSubmit) {
+ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext,
+ uint32_t indexInSubmit) {
Device* device = ToBackend(GetDevice());
BindGroupStateTracker bindingTracker(device);
RenderPassDescriptorHeapTracker renderPassTracker(device);
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
// Precompute the allocation of bindgroups in descriptor heaps
// TODO(cwallez@chromium.org): Iterating over all the commands here is inefficient. We
// should have a system where commands and descriptors are recorded in parallel then the
// heaps set using a small CommandList inserted just before the main CommandList.
{
- AllocateAndSetDescriptorHeaps(device, &bindingTracker, &renderPassTracker, &mCommands,
- indexInSubmit);
+ DAWN_TRY(AllocateAndSetDescriptorHeaps(device, &bindingTracker, &renderPassTracker,
+ &mCommands, indexInSubmit));
bindingTracker.Reset();
bindingTracker.SetID3D12DescriptorHeaps(commandList);
}
// Records the necessary barriers for the resource usage pre-computed by the frontend
- auto TransitionForPass = [](ComPtr<ID3D12GraphicsCommandList> commandList,
+ auto TransitionForPass = [](CommandRecordingContext* commandContext,
const PassResourceUsage& usages) {
std::vector<D3D12_RESOURCE_BARRIER> barriers;
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
for (size_t i = 0; i < usages.buffers.size(); ++i) {
D3D12_RESOURCE_BARRIER barrier;
if (ToBackend(usages.buffers[i])
- ->TransitionUsageAndGetResourceBarrier(&barrier, usages.bufferUsages[i])) {
+ ->TransitionUsageAndGetResourceBarrier(commandContext, &barrier,
+ usages.bufferUsages[i])) {
barriers.push_back(barrier);
}
}
@@ -530,15 +589,17 @@ namespace dawn_native { namespace d3d12 {
// cleared during record render pass if the texture subresource has not been
// initialized before the render pass.
if (!(usages.textureUsages[i] & dawn::TextureUsage::OutputAttachment)) {
- texture->EnsureSubresourceContentInitialized(
- commandList, 0, texture->GetNumMipLevels(), 0, texture->GetArrayLayers());
+ texture->EnsureSubresourceContentInitialized(commandContext, 0,
+ texture->GetNumMipLevels(), 0,
+ texture->GetArrayLayers());
}
}
for (size_t i = 0; i < usages.textures.size(); ++i) {
D3D12_RESOURCE_BARRIER barrier;
if (ToBackend(usages.textures[i])
- ->TransitionUsageAndGetResourceBarrier(&barrier, usages.textureUsages[i])) {
+ ->TransitionUsageAndGetResourceBarrier(commandContext, &barrier,
+ usages.textureUsages[i])) {
barriers.push_back(barrier);
}
}
@@ -557,7 +618,7 @@ namespace dawn_native { namespace d3d12 {
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
- TransitionForPass(commandList, passResourceUsages[nextPassNumber]);
+ TransitionForPass(commandContext, passResourceUsages[nextPassNumber]);
bindingTracker.SetInComputePass(true);
RecordComputePass(commandList, &bindingTracker);
@@ -568,9 +629,9 @@ namespace dawn_native { namespace d3d12 {
BeginRenderPassCmd* beginRenderPassCmd =
mCommands.NextCommand<BeginRenderPassCmd>();
- TransitionForPass(commandList, passResourceUsages[nextPassNumber]);
+ TransitionForPass(commandContext, passResourceUsages[nextPassNumber]);
bindingTracker.SetInComputePass(false);
- RecordRenderPass(commandList, &bindingTracker, &renderPassTracker,
+ RecordRenderPass(commandContext, &bindingTracker, &renderPassTracker,
beginRenderPassCmd);
nextPassNumber++;
@@ -581,8 +642,8 @@ namespace dawn_native { namespace d3d12 {
Buffer* srcBuffer = ToBackend(copy->source.Get());
Buffer* dstBuffer = ToBackend(copy->destination.Get());
- srcBuffer->TransitionUsageNow(commandList, dawn::BufferUsage::CopySrc);
- dstBuffer->TransitionUsageNow(commandList, dawn::BufferUsage::CopyDst);
+ srcBuffer->TransitionUsageNow(commandContext, dawn::BufferUsage::CopySrc);
+ dstBuffer->TransitionUsageNow(commandContext, dawn::BufferUsage::CopyDst);
commandList->CopyBufferRegion(
dstBuffer->GetD3D12Resource().Get(), copy->destinationOffset,
@@ -597,15 +658,15 @@ namespace dawn_native { namespace d3d12 {
if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
copy->destination.mipLevel)) {
texture->SetIsSubresourceContentInitialized(
- copy->destination.mipLevel, 1, copy->destination.arrayLayer, 1);
+ true, copy->destination.mipLevel, 1, copy->destination.arrayLayer, 1);
} else {
texture->EnsureSubresourceContentInitialized(
- commandList, copy->destination.mipLevel, 1,
+ commandContext, copy->destination.mipLevel, 1,
copy->destination.arrayLayer, 1);
}
- buffer->TransitionUsageNow(commandList, dawn::BufferUsage::CopySrc);
- texture->TransitionUsageNow(commandList, dawn::TextureUsage::CopyDst);
+ buffer->TransitionUsageNow(commandContext, dawn::BufferUsage::CopySrc);
+ texture->TransitionUsageNow(commandContext, dawn::TextureUsage::CopyDst);
auto copySplit = ComputeTextureCopySplit(
copy->destination.origin, copy->copySize, texture->GetFormat(),
@@ -636,11 +697,11 @@ namespace dawn_native { namespace d3d12 {
Texture* texture = ToBackend(copy->source.texture.Get());
Buffer* buffer = ToBackend(copy->destination.buffer.Get());
- texture->EnsureSubresourceContentInitialized(commandList, copy->source.mipLevel,
- 1, copy->source.arrayLayer, 1);
+ texture->EnsureSubresourceContentInitialized(
+ commandContext, copy->source.mipLevel, 1, copy->source.arrayLayer, 1);
- texture->TransitionUsageNow(commandList, dawn::TextureUsage::CopySrc);
- buffer->TransitionUsageNow(commandList, dawn::BufferUsage::CopyDst);
+ texture->TransitionUsageNow(commandContext, dawn::TextureUsage::CopySrc);
+ buffer->TransitionUsageNow(commandContext, dawn::BufferUsage::CopyDst);
TextureCopySplit copySplit = ComputeTextureCopySplit(
copy->source.origin, copy->copySize, texture->GetFormat(),
@@ -675,19 +736,19 @@ namespace dawn_native { namespace d3d12 {
Texture* source = ToBackend(copy->source.texture.Get());
Texture* destination = ToBackend(copy->destination.texture.Get());
- source->EnsureSubresourceContentInitialized(commandList, copy->source.mipLevel,
- 1, copy->source.arrayLayer, 1);
+ source->EnsureSubresourceContentInitialized(
+ commandContext, copy->source.mipLevel, 1, copy->source.arrayLayer, 1);
if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
copy->destination.mipLevel)) {
destination->SetIsSubresourceContentInitialized(
- copy->destination.mipLevel, 1, copy->destination.arrayLayer, 1);
+ true, copy->destination.mipLevel, 1, copy->destination.arrayLayer, 1);
} else {
destination->EnsureSubresourceContentInitialized(
- commandList, copy->destination.mipLevel, 1,
+ commandContext, copy->destination.mipLevel, 1,
copy->destination.arrayLayer, 1);
}
- source->TransitionUsageNow(commandList, dawn::TextureUsage::CopySrc);
- destination->TransitionUsageNow(commandList, dawn::TextureUsage::CopyDst);
+ source->TransitionUsageNow(commandContext, dawn::TextureUsage::CopySrc);
+ destination->TransitionUsageNow(commandContext, dawn::TextureUsage::CopyDst);
if (CanUseCopyResource(source->GetNumMipLevels(), source->GetSize(),
destination->GetSize(), copy->copySize)) {
@@ -717,50 +778,10 @@ namespace dawn_native { namespace d3d12 {
}
DAWN_ASSERT(renderPassTracker.IsHeapAllocationCompleted());
+ return {};
}
- void CommandBuffer::FlushSetVertexBuffers(ComPtr<ID3D12GraphicsCommandList> commandList,
- VertexBuffersInfo* vertexBuffersInfo,
- const RenderPipeline* renderPipeline) {
- DAWN_ASSERT(vertexBuffersInfo != nullptr);
- DAWN_ASSERT(renderPipeline != nullptr);
-
- auto inputsMask = renderPipeline->GetInputsSetMask();
-
- uint32_t startSlot = vertexBuffersInfo->startSlot;
- uint32_t endSlot = vertexBuffersInfo->endSlot;
-
- // If the input state has changed, we need to update the StrideInBytes
- // for the D3D12 buffer views. We also need to extend the dirty range to
- // touch all these slots because the stride may have changed.
- if (vertexBuffersInfo->lastRenderPipeline != renderPipeline) {
- vertexBuffersInfo->lastRenderPipeline = renderPipeline;
-
- for (uint32_t slot : IterateBitSet(inputsMask)) {
- startSlot = std::min(startSlot, slot);
- endSlot = std::max(endSlot, slot + 1);
- vertexBuffersInfo->d3d12BufferViews[slot].StrideInBytes =
- renderPipeline->GetInput(slot).stride;
- }
- }
-
- if (endSlot <= startSlot) {
- return;
- }
-
- // d3d12BufferViews is kept up to date with the most recent data passed
- // to SetVertexBuffers. This makes it correct to only track the start
- // and end of the dirty range. When FlushSetVertexBuffers is called,
- // we will at worst set non-dirty vertex buffers in duplicate.
- uint32_t count = endSlot - startSlot;
- commandList->IASetVertexBuffers(startSlot, count,
- &vertexBuffersInfo->d3d12BufferViews[startSlot]);
-
- vertexBuffersInfo->startSlot = kMaxVertexBuffers;
- vertexBuffersInfo->endSlot = 0;
- }
-
- void CommandBuffer::RecordComputePass(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void CommandBuffer::RecordComputePass(ID3D12GraphicsCommandList* commandList,
BindGroupStateTracker* bindingTracker) {
PipelineLayout* lastLayout = nullptr;
@@ -769,12 +790,15 @@ namespace dawn_native { namespace d3d12 {
switch (type) {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+ bindingTracker->Apply(commandList);
commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
} break;
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+ bindingTracker->Apply(commandList);
Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDispatchIndirectSignature();
@@ -796,7 +820,8 @@ namespace dawn_native { namespace d3d12 {
commandList->SetComputeRootSignature(layout->GetRootSignature().Get());
commandList->SetPipelineState(pipeline->GetPipelineState().Get());
- bindingTracker->SetInheritedBindGroups(commandList, lastLayout, layout);
+ bindingTracker->OnSetPipeline(pipeline);
+
lastLayout = layout;
} break;
@@ -809,8 +834,8 @@ namespace dawn_native { namespace d3d12 {
dynamicOffsets = mCommands.NextData<uint64_t>(cmd->dynamicOffsetCount);
}
- bindingTracker->SetBindGroup(commandList, lastLayout, group, cmd->index,
- cmd->dynamicOffsetCount, dynamicOffsets);
+ bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+ dynamicOffsets);
} break;
case Command::InsertDebugMarker: {
@@ -822,7 +847,7 @@ namespace dawn_native { namespace d3d12 {
constexpr uint64_t kPIXBlackColor = 0xff000000;
ToBackend(GetDevice())
->GetFunctions()
- ->pixSetMarkerOnCommandList(commandList.Get(), kPIXBlackColor, label);
+ ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
}
} break;
@@ -832,7 +857,7 @@ namespace dawn_native { namespace d3d12 {
if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
ToBackend(GetDevice())
->GetFunctions()
- ->pixEndEventOnCommandList(commandList.Get());
+ ->pixEndEventOnCommandList(commandList);
}
} break;
@@ -845,7 +870,7 @@ namespace dawn_native { namespace d3d12 {
constexpr uint64_t kPIXBlackColor = 0xff000000;
ToBackend(GetDevice())
->GetFunctions()
- ->pixBeginEventOnCommandList(commandList.Get(), kPIXBlackColor, label);
+ ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
}
} break;
@@ -854,11 +879,12 @@ namespace dawn_native { namespace d3d12 {
}
}
- void CommandBuffer::RecordRenderPass(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker,
RenderPassDescriptorHeapTracker* renderPassTracker,
BeginRenderPassCmd* renderPass) {
OMSetRenderTargetArgs args = renderPassTracker->GetSubpassOMSetRenderTargetArgs(renderPass);
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
// Clear framebuffer attachments as needed and transition to render target
{
@@ -886,14 +912,19 @@ namespace dawn_native { namespace d3d12 {
// color attachment, which will be correctly initialized.
ToBackend(resolveView->GetTexture())
->SetIsSubresourceContentInitialized(
- resolveView->GetBaseMipLevel(), resolveView->GetLevelCount(),
+ true, resolveView->GetBaseMipLevel(), resolveView->GetLevelCount(),
resolveView->GetBaseArrayLayer(), resolveView->GetLayerCount());
}
switch (attachmentInfo.storeOp) {
case dawn::StoreOp::Store: {
view->GetTexture()->SetIsSubresourceContentInitialized(
- view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
+ true, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
+ } break;
+
+ case dawn::StoreOp::Clear: {
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ false, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
} break;
default: { UNREACHABLE(); } break;
@@ -903,16 +934,11 @@ namespace dawn_native { namespace d3d12 {
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
auto& attachmentInfo = renderPass->depthStencilAttachment;
Texture* texture = ToBackend(renderPass->depthStencilAttachment.view->GetTexture());
- if ((texture->GetFormat().HasDepth() &&
- attachmentInfo.depthLoadOp == dawn::LoadOp::Load) ||
- (texture->GetFormat().HasStencil() &&
- attachmentInfo.stencilLoadOp == dawn::LoadOp::Load)) {
- texture->EnsureSubresourceContentInitialized(
- commandList, attachmentInfo.view->GetBaseMipLevel(),
- attachmentInfo.view->GetLevelCount(),
- attachmentInfo.view->GetBaseArrayLayer(),
- attachmentInfo.view->GetLayerCount());
- }
+ TextureView* view = ToBackend(attachmentInfo.view.Get());
+ float clearDepth = attachmentInfo.clearDepth;
+ // TODO(kainino@chromium.org): investigate: should the Dawn clear
+ // stencil type be uint8_t?
+ uint8_t clearStencil = static_cast<uint8_t>(attachmentInfo.clearStencil);
// Load op - depth/stencil
bool doDepthClear = texture->GetFormat().HasDepth() &&
@@ -927,19 +953,39 @@ namespace dawn_native { namespace d3d12 {
if (doStencilClear) {
clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
}
+ // If the depth stencil texture has not been initialized, we want to use loadop
+ // clear to init the contents to 0's
+ if (!texture->IsSubresourceContentInitialized(
+ view->GetBaseMipLevel(), view->GetLevelCount(), view->GetBaseArrayLayer(),
+ view->GetLayerCount())) {
+ if (texture->GetFormat().HasDepth() &&
+ attachmentInfo.depthLoadOp == dawn::LoadOp::Load) {
+ clearDepth = 0.0f;
+ clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+ }
+ if (texture->GetFormat().HasStencil() &&
+ attachmentInfo.stencilLoadOp == dawn::LoadOp::Load) {
+ clearStencil = 0u;
+ clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+ }
+ }
if (clearFlags) {
D3D12_CPU_DESCRIPTOR_HANDLE handle = args.dsv;
- // TODO(kainino@chromium.org): investigate: should the Dawn clear
- // stencil type be uint8_t?
- uint8_t clearStencil = static_cast<uint8_t>(attachmentInfo.clearStencil);
- commandList->ClearDepthStencilView(
- handle, clearFlags, attachmentInfo.clearDepth, clearStencil, 0, nullptr);
+ commandList->ClearDepthStencilView(handle, clearFlags, clearDepth, clearStencil,
+ 0, nullptr);
+ }
+
+ if (attachmentInfo.depthStoreOp == dawn::StoreOp::Store &&
+ attachmentInfo.stencilStoreOp == dawn::StoreOp::Store) {
texture->SetIsSubresourceContentInitialized(
- attachmentInfo.view->GetBaseMipLevel(),
- attachmentInfo.view->GetLevelCount(),
- attachmentInfo.view->GetBaseArrayLayer(),
- attachmentInfo.view->GetLayerCount());
+ true, view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount());
+ } else if (attachmentInfo.depthStoreOp == dawn::StoreOp::Clear &&
+ attachmentInfo.stencilStoreOp == dawn::StoreOp::Clear) {
+ texture->SetIsSubresourceContentInitialized(
+ false, view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount());
}
}
}
@@ -969,14 +1015,16 @@ namespace dawn_native { namespace d3d12 {
RenderPipeline* lastPipeline = nullptr;
PipelineLayout* lastLayout = nullptr;
- VertexBuffersInfo vertexBuffersInfo = {};
+ VertexBufferTracker vertexBufferTracker = {};
+ IndexBufferTracker indexBufferTracker = {};
auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
switch (type) {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
- FlushSetVertexBuffers(commandList, &vertexBuffersInfo, lastPipeline);
+ bindingTracker->Apply(commandList);
+ vertexBufferTracker.Apply(commandList, lastPipeline);
commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
draw->firstVertex, draw->firstInstance);
} break;
@@ -984,7 +1032,9 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
- FlushSetVertexBuffers(commandList, &vertexBuffersInfo, lastPipeline);
+ bindingTracker->Apply(commandList);
+ indexBufferTracker.Apply(commandList);
+ vertexBufferTracker.Apply(commandList, lastPipeline);
commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
draw->firstIndex, draw->baseVertex,
draw->firstInstance);
@@ -993,7 +1043,8 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- FlushSetVertexBuffers(commandList, &vertexBuffersInfo, lastPipeline);
+ bindingTracker->Apply(commandList);
+ vertexBufferTracker.Apply(commandList, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDrawIndirectSignature();
@@ -1005,7 +1056,9 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
- FlushSetVertexBuffers(commandList, &vertexBuffersInfo, lastPipeline);
+ bindingTracker->Apply(commandList);
+ indexBufferTracker.Apply(commandList);
+ vertexBufferTracker.Apply(commandList, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
@@ -1023,7 +1076,7 @@ namespace dawn_native { namespace d3d12 {
constexpr uint64_t kPIXBlackColor = 0xff000000;
ToBackend(GetDevice())
->GetFunctions()
- ->pixSetMarkerOnCommandList(commandList.Get(), kPIXBlackColor, label);
+ ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
}
} break;
@@ -1033,7 +1086,7 @@ namespace dawn_native { namespace d3d12 {
if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
ToBackend(GetDevice())
->GetFunctions()
- ->pixEndEventOnCommandList(commandList.Get());
+ ->pixEndEventOnCommandList(commandList);
}
} break;
@@ -1046,7 +1099,7 @@ namespace dawn_native { namespace d3d12 {
constexpr uint64_t kPIXBlackColor = 0xff000000;
ToBackend(GetDevice())
->GetFunctions()
- ->pixBeginEventOnCommandList(commandList.Get(), kPIXBlackColor, label);
+ ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
}
} break;
@@ -1059,7 +1112,8 @@ namespace dawn_native { namespace d3d12 {
commandList->SetPipelineState(pipeline->GetPipelineState().Get());
commandList->IASetPrimitiveTopology(pipeline->GetD3D12PrimitiveTopology());
- bindingTracker->SetInheritedBindGroups(commandList, lastLayout, layout);
+ bindingTracker->OnSetPipeline(pipeline);
+ indexBufferTracker.OnSetPipeline(pipeline);
lastPipeline = pipeline;
lastLayout = layout;
@@ -1074,44 +1128,21 @@ namespace dawn_native { namespace d3d12 {
dynamicOffsets = iter->NextData<uint64_t>(cmd->dynamicOffsetCount);
}
- bindingTracker->SetBindGroup(commandList, lastLayout, group, cmd->index,
- cmd->dynamicOffsetCount, dynamicOffsets);
+ bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+ dynamicOffsets);
} break;
case Command::SetIndexBuffer: {
SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
- Buffer* buffer = ToBackend(cmd->buffer.Get());
- D3D12_INDEX_BUFFER_VIEW bufferView;
- bufferView.BufferLocation = buffer->GetVA() + cmd->offset;
- bufferView.SizeInBytes = buffer->GetSize() - cmd->offset;
- // TODO(cwallez@chromium.org): Make index buffers lazily applied, right now
- // this will break if the pipeline is changed for one with a different index
- // format after SetIndexBuffer
- bufferView.Format =
- DXGIIndexFormat(lastPipeline->GetVertexInputDescriptor()->indexFormat);
-
- commandList->IASetIndexBuffer(&bufferView);
+ indexBufferTracker.OnSetIndexBuffer(ToBackend(cmd->buffer.Get()), cmd->offset);
} break;
- case Command::SetVertexBuffers: {
- SetVertexBuffersCmd* cmd = iter->NextCommand<SetVertexBuffersCmd>();
- auto buffers = iter->NextData<Ref<BufferBase>>(cmd->count);
- auto offsets = iter->NextData<uint64_t>(cmd->count);
-
- vertexBuffersInfo.startSlot =
- std::min(vertexBuffersInfo.startSlot, cmd->startSlot);
- vertexBuffersInfo.endSlot =
- std::max(vertexBuffersInfo.endSlot, cmd->startSlot + cmd->count);
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- for (uint32_t i = 0; i < cmd->count; ++i) {
- Buffer* buffer = ToBackend(buffers[i].Get());
- auto* d3d12BufferView =
- &vertexBuffersInfo.d3d12BufferViews[cmd->startSlot + i];
- d3d12BufferView->BufferLocation = buffer->GetVA() + offsets[i];
- d3d12BufferView->SizeInBytes = buffer->GetSize() - offsets[i];
- // The bufferView stride is set based on the input state before a draw.
- }
+ vertexBufferTracker.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+ cmd->offset);
} break;
default:
@@ -1129,7 +1160,7 @@ namespace dawn_native { namespace d3d12 {
// TODO(brandon1.jones@intel.com): avoid calling this function and enable MSAA
// resolve in D3D12 render pass on the platforms that support this feature.
if (renderPass->attachmentState->GetSampleCount() > 1) {
- ResolveMultisampledRenderPass(commandList, renderPass);
+ ResolveMultisampledRenderPass(commandContext, renderPass);
}
return;
} break;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
index a367b4b48dd..ce7f451e256 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
@@ -18,6 +18,7 @@
#include "common/Constants.h"
#include "dawn_native/CommandAllocator.h"
#include "dawn_native/CommandBuffer.h"
+#include "dawn_native/Error.h"
#include "dawn_native/d3d12/Forward.h"
#include "dawn_native/d3d12/d3d12_platform.h"
@@ -31,35 +32,22 @@ namespace dawn_native {
namespace dawn_native { namespace d3d12 {
class BindGroupStateTracker;
+ class CommandRecordingContext;
class Device;
class RenderPassDescriptorHeapTracker;
class RenderPipeline;
- struct VertexBuffersInfo {
- // startSlot and endSlot indicate the range of dirty vertex buffers.
- // If there are multiple calls to SetVertexBuffers, the start and end
- // represent the union of the dirty ranges (the union may have non-dirty
- // data in the middle of the range).
- const RenderPipeline* lastRenderPipeline = nullptr;
- uint32_t startSlot = kMaxVertexBuffers;
- uint32_t endSlot = 0;
- std::array<D3D12_VERTEX_BUFFER_VIEW, kMaxVertexBuffers> d3d12BufferViews = {};
- };
-
class CommandBuffer : public CommandBufferBase {
public:
CommandBuffer(CommandEncoderBase* encoder, const CommandBufferDescriptor* descriptor);
~CommandBuffer();
- void RecordCommands(ComPtr<ID3D12GraphicsCommandList> commandList, uint32_t indexInSubmit);
+ MaybeError RecordCommands(CommandRecordingContext* commandContext, uint32_t indexInSubmit);
private:
- void FlushSetVertexBuffers(ComPtr<ID3D12GraphicsCommandList> commandList,
- VertexBuffersInfo* vertexBuffersInfo,
- const RenderPipeline* lastRenderPipeline);
- void RecordComputePass(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void RecordComputePass(ID3D12GraphicsCommandList* commandList,
BindGroupStateTracker* bindingTracker);
- void RecordRenderPass(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void RecordRenderPass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker,
RenderPassDescriptorHeapTracker* renderPassTracker,
BeginRenderPassCmd* renderPass);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
new file mode 100644
index 00000000000..4d927b8322c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
@@ -0,0 +1,73 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "dawn_native/d3d12/CommandRecordingContext.h"
+#include "dawn_native/d3d12/CommandAllocatorManager.h"
+#include "dawn_native/d3d12/D3D12Error.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ MaybeError CommandRecordingContext::Open(ID3D12Device* d3d12Device,
+ CommandAllocatorManager* commandAllocationManager) {
+ ASSERT(!IsOpen());
+ ID3D12CommandAllocator* commandAllocator;
+ DAWN_TRY_ASSIGN(commandAllocator, commandAllocationManager->ReserveCommandAllocator());
+ if (mD3d12CommandList != nullptr) {
+ MaybeError error = CheckHRESULT(mD3d12CommandList->Reset(commandAllocator, nullptr),
+ "D3D12 resetting command list");
+ if (error.IsError()) {
+ mD3d12CommandList.Reset();
+ DAWN_TRY(std::move(error));
+ }
+ } else {
+ ComPtr<ID3D12GraphicsCommandList> d3d12GraphicsCommandList;
+ DAWN_TRY(CheckHRESULT(
+ d3d12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, commandAllocator,
+ nullptr, IID_PPV_ARGS(&d3d12GraphicsCommandList)),
+ "D3D12 creating direct command list"));
+ mD3d12CommandList = std::move(d3d12GraphicsCommandList);
+ }
+
+ mIsOpen = true;
+
+ return {};
+ }
+
+ ResultOrError<ID3D12GraphicsCommandList*> CommandRecordingContext::Close() {
+ ASSERT(IsOpen());
+ mIsOpen = false;
+ MaybeError error =
+ CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
+ if (error.IsError()) {
+ mD3d12CommandList.Reset();
+ DAWN_TRY(std::move(error));
+ }
+ return mD3d12CommandList.Get();
+ }
+
+ ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
+ ASSERT(mD3d12CommandList != nullptr);
+ ASSERT(IsOpen());
+ return mD3d12CommandList.Get();
+ }
+
+ void CommandRecordingContext::Release() {
+ mD3d12CommandList.Reset();
+ mIsOpen = false;
+ }
+
+ bool CommandRecordingContext::IsOpen() const {
+ return mIsOpen;
+ }
+
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
new file mode 100644
index 00000000000..544dae97354
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
@@ -0,0 +1,38 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
+#define DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
+
+#include "dawn_native/Error.h"
+#include "dawn_native/d3d12/d3d12_platform.h"
+
+namespace dawn_native { namespace d3d12 {
+ class CommandAllocatorManager;
+
+ class CommandRecordingContext {
+ public:
+ MaybeError Open(ID3D12Device* d3d12Device,
+ CommandAllocatorManager* commandAllocationManager);
+ ResultOrError<ID3D12GraphicsCommandList*> Close();
+ ID3D12GraphicsCommandList* GetCommandList() const;
+ void Release();
+ bool IsOpen() const;
+
+ private:
+ ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
+ bool mIsOpen = false;
+ };
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.cpp
index 772f5d21f4c..9a55e690b2a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.cpp
@@ -14,7 +14,6 @@
#include "dawn_native/d3d12/CommittedResourceAllocatorD3D12.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/ResourceHeapD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -22,7 +21,7 @@ namespace dawn_native { namespace d3d12 {
: mDevice(device), mHeapType(heapType) {
}
- ResultOrError<ResourceMemoryAllocation> CommittedResourceAllocator::Allocate(
+ ResultOrError<ResourceHeapAllocation> CommittedResourceAllocator::Allocate(
const D3D12_RESOURCE_DESC& resourceDescriptor,
D3D12_RESOURCE_STATES initialUsage,
D3D12_HEAP_FLAGS heapFlags) {
@@ -40,13 +39,14 @@ namespace dawn_native { namespace d3d12 {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate resource");
}
- return ResourceMemoryAllocation(
- /*offset*/ 0, new ResourceHeap(std::move(committedResource)),
- AllocationMethod::kDirect);
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+
+ return ResourceHeapAllocation{info,
+ /*offset*/ 0, std::move(committedResource)};
}
- void CommittedResourceAllocator::Deallocate(ResourceMemoryAllocation& allocation) {
- std::unique_ptr<ResourceHeap> resourceHeap(ToBackend(allocation.GetResourceHeap()));
- mDevice->ReferenceUntilUnused(resourceHeap->GetD3D12Resource());
+ void CommittedResourceAllocator::Deallocate(ResourceHeapAllocation& allocation) {
+ mDevice->ReferenceUntilUnused(allocation.GetD3D12Resource());
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.h
index 419d1c6cbd9..7bfb9d8b420 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.h
@@ -17,7 +17,7 @@
#include "common/SerialQueue.h"
#include "dawn_native/Error.h"
-#include "dawn_native/ResourceMemoryAllocation.h"
+#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
@@ -31,11 +31,11 @@ namespace dawn_native { namespace d3d12 {
CommittedResourceAllocator(Device* device, D3D12_HEAP_TYPE heapType);
~CommittedResourceAllocator() = default;
- ResultOrError<ResourceMemoryAllocation> Allocate(
+ ResultOrError<ResourceHeapAllocation> Allocate(
const D3D12_RESOURCE_DESC& resourceDescriptor,
D3D12_RESOURCE_STATES initialUsage,
D3D12_HEAP_FLAGS heapFlags);
- void Deallocate(ResourceMemoryAllocation& allocation);
+ void Deallocate(ResourceHeapAllocation& allocation);
private:
Device* mDevice;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
index 5aec4b3256b..38bde463f15 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
@@ -12,19 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn_native/d3d12/ResourceHeapD3D12.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
-namespace dawn_native { namespace d3d12 {
+#include <string>
- ResourceHeap::ResourceHeap(ComPtr<ID3D12Resource> resource) : mResource(resource) {
- }
+namespace dawn_native { namespace d3d12 {
+ MaybeError CheckHRESULT(HRESULT result, const char* context) {
+ if (DAWN_LIKELY(SUCCEEDED(result))) {
+ return {};
+ }
- ComPtr<ID3D12Resource> ResourceHeap::GetD3D12Resource() const {
- return mResource;
+ std::string message = std::string(context) + " failed with " + std::to_string(result);
+ return DAWN_DEVICE_LOST_ERROR(message);
}
- D3D12_GPU_VIRTUAL_ADDRESS ResourceHeap::GetGPUPointer() const {
- return mResource->GetGPUVirtualAddress();
- }
}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
new file mode 100644
index 00000000000..b5f5eb73800
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
@@ -0,0 +1,28 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_D3D12ERROR_H_
+#define DAWNNATIVE_D3D12_D3D12ERROR_H_
+
+#include <d3d12.h>
+#include "dawn_native/Error.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ // Returns a success only if result of HResult is success
+ MaybeError CheckHRESULT(HRESULT result, const char* context);
+
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_D3D12ERROR_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp
index 75d188db01e..a69641a152b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp
@@ -61,29 +61,19 @@ namespace dawn_native { namespace d3d12 {
} {
}
- DescriptorHeapHandle DescriptorHeapAllocator::Allocate(D3D12_DESCRIPTOR_HEAP_TYPE type,
- uint32_t count,
- uint32_t allocationSize,
- DescriptorHeapInfo* heapInfo,
- D3D12_DESCRIPTOR_HEAP_FLAGS flags) {
- // TODO(enga@google.com): This is just a linear allocator so the heap will quickly run out
- // of space causing a new one to be allocated We should reuse heap subranges that have been
- // released
- if (count == 0) {
- return DescriptorHeapHandle();
- }
-
- {
- // If the current pool for this type has space, linearly allocate count bytes in the
- // pool
- auto& allocationInfo = heapInfo->second;
- if (allocationInfo.remaining >= count) {
- DescriptorHeapHandle handle(heapInfo->first, mSizeIncrements[type],
- allocationInfo.size - allocationInfo.remaining);
- allocationInfo.remaining -= count;
- Release(handle);
- return handle;
- }
+ ResultOrError<DescriptorHeapHandle> DescriptorHeapAllocator::Allocate(
+ D3D12_DESCRIPTOR_HEAP_TYPE type,
+ uint32_t count,
+ uint32_t allocationSize,
+ DescriptorHeapInfo* heapInfo,
+ D3D12_DESCRIPTOR_HEAP_FLAGS flags) {
+ const Serial pendingSerial = mDevice->GetPendingCommandSerial();
+ size_t startOffset = (heapInfo->heap == nullptr)
+ ? RingBufferAllocator::kInvalidOffset
+ : heapInfo->allocator.Allocate(count, pendingSerial);
+ if (startOffset != RingBufferAllocator::kInvalidOffset) {
+ return DescriptorHeapHandle{heapInfo->heap, mSizeIncrements[type],
+ static_cast<uint32_t>(startOffset)};
}
// If the pool has no more space, replace the pool with a new one of the specified size
@@ -94,39 +84,52 @@ namespace dawn_native { namespace d3d12 {
heapDescriptor.Flags = flags;
heapDescriptor.NodeMask = 0;
ComPtr<ID3D12DescriptorHeap> heap;
- ASSERT_SUCCESS(
- mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)));
+ if (FAILED(mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor,
+ IID_PPV_ARGS(&heap)))) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate heap");
+ }
- AllocationInfo allocationInfo = {allocationSize, allocationSize - count};
- *heapInfo = std::make_pair(heap, allocationInfo);
+ mDevice->ReferenceUntilUnused(heap);
- DescriptorHeapHandle handle(heap, mSizeIncrements[type], 0);
- Release(handle);
- return handle;
+ *heapInfo = {heap, RingBufferAllocator(allocationSize)};
+
+ startOffset = heapInfo->allocator.Allocate(count, pendingSerial);
+
+ ASSERT(startOffset != RingBufferAllocator::kInvalidOffset);
+
+ return DescriptorHeapHandle(heap, mSizeIncrements[type], startOffset);
}
- DescriptorHeapHandle DescriptorHeapAllocator::AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE type,
- uint32_t count) {
+ ResultOrError<DescriptorHeapHandle> DescriptorHeapAllocator::AllocateCPUHeap(
+ D3D12_DESCRIPTOR_HEAP_TYPE type,
+ uint32_t count) {
return Allocate(type, count, count, &mCpuDescriptorHeapInfos[type],
D3D12_DESCRIPTOR_HEAP_FLAG_NONE);
}
- DescriptorHeapHandle DescriptorHeapAllocator::AllocateGPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE type,
- uint32_t count) {
+ ResultOrError<DescriptorHeapHandle> DescriptorHeapAllocator::AllocateGPUHeap(
+ D3D12_DESCRIPTOR_HEAP_TYPE type,
+ uint32_t count) {
ASSERT(type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
type == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
- unsigned int heapSize =
- (type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ? kMaxCbvUavSrvHeapSize
- : kMaxSamplerHeapSize);
+ unsigned int heapSize = (type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV
+ ? D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1
+ : D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE);
return Allocate(type, count, heapSize, &mGpuDescriptorHeapInfos[type],
D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE);
}
- void DescriptorHeapAllocator::Tick(uint64_t lastCompletedSerial) {
- mReleasedHandles.ClearUpTo(lastCompletedSerial);
- }
+ void DescriptorHeapAllocator::Deallocate(uint64_t lastCompletedSerial) {
+ for (uint32_t i = 0; i < mCpuDescriptorHeapInfos.size(); i++) {
+ if (mCpuDescriptorHeapInfos[i].heap != nullptr) {
+ mCpuDescriptorHeapInfos[i].allocator.Deallocate(lastCompletedSerial);
+ }
+ }
- void DescriptorHeapAllocator::Release(DescriptorHeapHandle handle) {
- mReleasedHandles.Enqueue(handle, mDevice->GetPendingCommandSerial());
+ for (uint32_t i = 0; i < mGpuDescriptorHeapInfos.size(); i++) {
+ if (mGpuDescriptorHeapInfos[i].heap != nullptr) {
+ mGpuDescriptorHeapInfos[i].allocator.Deallocate(lastCompletedSerial);
+ }
+ }
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h
index f1ba5029f87..e4949a68cd7 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h
@@ -21,6 +21,9 @@
#include <vector>
#include "common/SerialQueue.h"
+#include "dawn_native/Error.h"
+#include "dawn_native/RingBufferAllocator.h"
+
namespace dawn_native { namespace d3d12 {
class Device;
@@ -46,36 +49,31 @@ namespace dawn_native { namespace d3d12 {
public:
DescriptorHeapAllocator(Device* device);
- DescriptorHeapHandle AllocateGPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE type, uint32_t count);
- DescriptorHeapHandle AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE type, uint32_t count);
- void Tick(uint64_t lastCompletedSerial);
+ ResultOrError<DescriptorHeapHandle> AllocateGPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE type,
+ uint32_t count);
+ ResultOrError<DescriptorHeapHandle> AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE type,
+ uint32_t count);
+ void Deallocate(uint64_t lastCompletedSerial);
private:
- static constexpr unsigned int kMaxCbvUavSrvHeapSize = 1000000;
- static constexpr unsigned int kMaxSamplerHeapSize = 2048;
- static constexpr unsigned int kDescriptorHeapTypes =
- D3D12_DESCRIPTOR_HEAP_TYPE::D3D12_DESCRIPTOR_HEAP_TYPE_NUM_TYPES;
-
- struct AllocationInfo {
- uint32_t size = 0;
- uint32_t remaining = 0;
+ struct DescriptorHeapInfo {
+ ComPtr<ID3D12DescriptorHeap> heap;
+ RingBufferAllocator allocator;
};
- using DescriptorHeapInfo = std::pair<ComPtr<ID3D12DescriptorHeap>, AllocationInfo>;
-
- DescriptorHeapHandle Allocate(D3D12_DESCRIPTOR_HEAP_TYPE type,
- uint32_t count,
- uint32_t allocationSize,
- DescriptorHeapInfo* heapInfo,
- D3D12_DESCRIPTOR_HEAP_FLAGS flags);
- void Release(DescriptorHeapHandle handle);
+ ResultOrError<DescriptorHeapHandle> Allocate(D3D12_DESCRIPTOR_HEAP_TYPE type,
+ uint32_t count,
+ uint32_t allocationSize,
+ DescriptorHeapInfo* heapInfo,
+ D3D12_DESCRIPTOR_HEAP_FLAGS flags);
Device* mDevice;
- std::array<uint32_t, kDescriptorHeapTypes> mSizeIncrements;
- std::array<DescriptorHeapInfo, kDescriptorHeapTypes> mCpuDescriptorHeapInfos;
- std::array<DescriptorHeapInfo, kDescriptorHeapTypes> mGpuDescriptorHeapInfos;
- SerialQueue<DescriptorHeapHandle> mReleasedHandles;
+ std::array<uint32_t, D3D12_DESCRIPTOR_HEAP_TYPE_NUM_TYPES> mSizeIncrements;
+ std::array<DescriptorHeapInfo, D3D12_DESCRIPTOR_HEAP_TYPE_NUM_TYPES>
+ mCpuDescriptorHeapInfos;
+ std::array<DescriptorHeapInfo, D3D12_DESCRIPTOR_HEAP_TYPE_NUM_TYPES>
+ mGpuDescriptorHeapInfos;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
index e925a771b7f..f1b2e3120fa 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
@@ -25,13 +25,14 @@
#include "dawn_native/d3d12/CommandAllocatorManager.h"
#include "dawn_native/d3d12/CommandBufferD3D12.h"
#include "dawn_native/d3d12/ComputePipelineD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DescriptorHeapAllocator.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
#include "dawn_native/d3d12/QueueD3D12.h"
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
#include "dawn_native/d3d12/ResourceAllocator.h"
-#include "dawn_native/d3d12/ResourceHeapD3D12.h"
+#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
#include "dawn_native/d3d12/ShaderModuleD3D12.h"
#include "dawn_native/d3d12/StagingBufferD3D12.h"
@@ -40,10 +41,6 @@
namespace dawn_native { namespace d3d12 {
- void ASSERT_SUCCESS(HRESULT hr) {
- ASSERT(SUCCEEDED(hr));
- }
-
Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor)
: DeviceBase(adapter, descriptor) {
if (descriptor != nullptr) {
@@ -60,10 +57,14 @@ namespace dawn_native { namespace d3d12 {
D3D12_COMMAND_QUEUE_DESC queueDesc = {};
queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
- ASSERT_SUCCESS(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)));
+ DAWN_TRY(
+ CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
+ "D3D12 create command queue"));
+
+ DAWN_TRY(CheckHRESULT(mD3d12Device->CreateFence(mLastSubmittedSerial, D3D12_FENCE_FLAG_NONE,
+ IID_PPV_ARGS(&mFence)),
+ "D3D12 create fence"));
- ASSERT_SUCCESS(mD3d12Device->CreateFence(mLastSubmittedSerial, D3D12_FENCE_FLAG_NONE,
- IID_PPV_ARGS(&mFence)));
mFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
ASSERT(mFenceEvent != nullptr);
@@ -72,8 +73,9 @@ namespace dawn_native { namespace d3d12 {
mDescriptorHeapAllocator = std::make_unique<DescriptorHeapAllocator>(this);
mMapRequestTracker = std::make_unique<MapRequestTracker>(this);
mResourceAllocator = std::make_unique<ResourceAllocator>(this);
+ mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
- NextSerial();
+ DAWN_TRY(NextSerial());
// Initialize indirect commands
D3D12_INDIRECT_ARGUMENT_DESC argumentDesc = {};
@@ -104,14 +106,15 @@ namespace dawn_native { namespace d3d12 {
Device::~Device() {
// Immediately forget about all pending commands
- if (mPendingCommands.open) {
- mPendingCommands.commandList->Close();
- mPendingCommands.open = false;
- mPendingCommands.commandList = nullptr;
- }
- NextSerial();
- WaitForSerial(mLastSubmittedSerial); // Wait for all in-flight commands to finish executing
- TickImpl(); // Call tick one last time so resources are cleaned up
+ mPendingCommands.Release();
+
+ ConsumedError(NextSerial());
+ // Wait for all in-flight commands to finish executing
+ ConsumedError(WaitForSerial(mLastSubmittedSerial));
+
+ // Call tick one last time so resources are cleaned up. Ignore the return value so we can
+ // continue shutting down in an orderly fashion.
+ ConsumedError(TickImpl());
// Free services explicitly so that they can free D3D12 resources before destruction of the
// device.
@@ -133,7 +136,7 @@ namespace dawn_native { namespace d3d12 {
mUsedComObjectRefs.ClearUpTo(mCompletedSerial);
ASSERT(mUsedComObjectRefs.Empty());
- ASSERT(mPendingCommands.commandList == nullptr);
+ ASSERT(!mPendingCommands.IsOpen());
}
ComPtr<ID3D12Device> Device::GetD3D12Device() const {
@@ -176,27 +179,17 @@ namespace dawn_native { namespace d3d12 {
return mResourceAllocator.get();
}
- void Device::OpenCommandList(ComPtr<ID3D12GraphicsCommandList>* commandList) {
- ComPtr<ID3D12GraphicsCommandList>& cmdList = *commandList;
- if (!cmdList) {
- ASSERT_SUCCESS(mD3d12Device->CreateCommandList(
- 0, D3D12_COMMAND_LIST_TYPE_DIRECT,
- mCommandAllocatorManager->ReserveCommandAllocator().Get(), nullptr,
- IID_PPV_ARGS(&cmdList)));
- } else {
- ASSERT_SUCCESS(
- cmdList->Reset(mCommandAllocatorManager->ReserveCommandAllocator().Get(), nullptr));
- }
+ CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
+ return mCommandAllocatorManager.get();
}
- ComPtr<ID3D12GraphicsCommandList> Device::GetPendingCommandList() {
+ ResultOrError<CommandRecordingContext*> Device::GetPendingCommandContext() {
// Callers of GetPendingCommandList do so to record commands. Only reserve a command
// allocator when it is needed so we don't submit empty command lists
- if (!mPendingCommands.open) {
- OpenCommandList(&mPendingCommands.commandList);
- mPendingCommands.open = true;
+ if (!mPendingCommands.IsOpen()) {
+ DAWN_TRY(mPendingCommands.Open(mD3d12Device.Get(), mCommandAllocatorManager.get()));
}
- return mPendingCommands.commandList;
+ return &mPendingCommands;
}
Serial Device::GetCompletedCommandSerial() const {
@@ -211,56 +204,64 @@ namespace dawn_native { namespace d3d12 {
return mLastSubmittedSerial + 1;
}
- void Device::TickImpl() {
+ MaybeError Device::TickImpl() {
// Perform cleanup operations to free unused objects
mCompletedSerial = mFence->GetCompletedValue();
// Uploader should tick before the resource allocator
- // as it enqueues resources to be released.
- mDynamicUploader->Tick(mCompletedSerial);
+ // as it enqueued resources to be released.
+ mDynamicUploader->Deallocate(mCompletedSerial);
mResourceAllocator->Tick(mCompletedSerial);
- mCommandAllocatorManager->Tick(mCompletedSerial);
- mDescriptorHeapAllocator->Tick(mCompletedSerial);
+ DAWN_TRY(mCommandAllocatorManager->Tick(mCompletedSerial));
+ mDescriptorHeapAllocator->Deallocate(mCompletedSerial);
mMapRequestTracker->Tick(mCompletedSerial);
mUsedComObjectRefs.ClearUpTo(mCompletedSerial);
- ExecuteCommandLists({});
- NextSerial();
+ DAWN_TRY(ExecuteCommandContext(nullptr));
+ DAWN_TRY(NextSerial());
+ return {};
}
- void Device::NextSerial() {
+ MaybeError Device::NextSerial() {
mLastSubmittedSerial++;
- ASSERT_SUCCESS(mCommandQueue->Signal(mFence.Get(), mLastSubmittedSerial));
+ return CheckHRESULT(mCommandQueue->Signal(mFence.Get(), mLastSubmittedSerial),
+ "D3D12 command queue signal fence");
}
- void Device::WaitForSerial(uint64_t serial) {
+ MaybeError Device::WaitForSerial(uint64_t serial) {
mCompletedSerial = mFence->GetCompletedValue();
if (mCompletedSerial < serial) {
- ASSERT_SUCCESS(mFence->SetEventOnCompletion(serial, mFenceEvent));
+ DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(serial, mFenceEvent),
+ "D3D12 set event on completion"));
WaitForSingleObject(mFenceEvent, INFINITE);
}
+ return {};
}
void Device::ReferenceUntilUnused(ComPtr<IUnknown> object) {
mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
}
- void Device::ExecuteCommandLists(std::initializer_list<ID3D12CommandList*> commandLists) {
+ MaybeError Device::ExecuteCommandContext(CommandRecordingContext* commandContext) {
+ UINT numLists = 0;
+ std::array<ID3D12CommandList*, 2> d3d12CommandLists;
+
// If there are pending commands, prepend them to ExecuteCommandLists
- if (mPendingCommands.open) {
- std::vector<ID3D12CommandList*> lists(commandLists.size() + 1);
- mPendingCommands.commandList->Close();
- mPendingCommands.open = false;
- lists[0] = mPendingCommands.commandList.Get();
- std::copy(commandLists.begin(), commandLists.end(), lists.begin() + 1);
- mCommandQueue->ExecuteCommandLists(static_cast<UINT>(commandLists.size() + 1),
- lists.data());
- mPendingCommands.commandList = nullptr;
- } else {
- std::vector<ID3D12CommandList*> lists(commandLists);
- mCommandQueue->ExecuteCommandLists(static_cast<UINT>(commandLists.size()),
- lists.data());
+ if (mPendingCommands.IsOpen()) {
+ ID3D12GraphicsCommandList* d3d12CommandList;
+ DAWN_TRY_ASSIGN(d3d12CommandList, mPendingCommands.Close());
+ d3d12CommandLists[numLists++] = d3d12CommandList;
+ }
+ if (commandContext != nullptr) {
+ ID3D12GraphicsCommandList* d3d12CommandList;
+ DAWN_TRY_ASSIGN(d3d12CommandList, commandContext->Close());
+ d3d12CommandLists[numLists++] = d3d12CommandList;
}
+ if (numLists > 0) {
+ mCommandQueue->ExecuteCommandLists(numLists, d3d12CommandLists.data());
+ }
+
+ return {};
}
ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
@@ -286,14 +287,14 @@ namespace dawn_native { namespace d3d12 {
}
ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) {
- return new PipelineLayout(this, descriptor);
+ return PipelineLayout::Create(this, descriptor);
}
ResultOrError<QueueBase*> Device::CreateQueueImpl() {
return new Queue(this);
}
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
- return new RenderPipeline(this, descriptor);
+ return RenderPipeline::Create(this, descriptor);
}
ResultOrError<SamplerBase*> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
return new Sampler(this, descriptor);
@@ -307,7 +308,7 @@ namespace dawn_native { namespace d3d12 {
return new SwapChain(this, descriptor);
}
ResultOrError<TextureBase*> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return new Texture(this, descriptor);
+ return Texture::Create(this, descriptor);
}
ResultOrError<TextureViewBase*> Device::CreateTextureViewImpl(
TextureBase* texture,
@@ -318,6 +319,7 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
std::unique_ptr<StagingBufferBase> stagingBuffer =
std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
return std::move(stagingBuffer);
}
@@ -326,59 +328,30 @@ namespace dawn_native { namespace d3d12 {
BufferBase* destination,
uint64_t destinationOffset,
uint64_t size) {
+ CommandRecordingContext* commandRecordingContext;
+ DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
+
ToBackend(destination)
- ->TransitionUsageNow(GetPendingCommandList(), dawn::BufferUsage::CopyDst);
+ ->TransitionUsageNow(commandRecordingContext, dawn::BufferUsage::CopyDst);
- GetPendingCommandList()->CopyBufferRegion(
+ commandRecordingContext->GetCommandList()->CopyBufferRegion(
ToBackend(destination)->GetD3D12Resource().Get(), destinationOffset,
ToBackend(source)->GetResource(), sourceOffset, size);
return {};
}
- size_t Device::GetD3D12HeapTypeToIndex(D3D12_HEAP_TYPE heapType) const {
- ASSERT(heapType > 0);
- ASSERT(static_cast<uint32_t>(heapType) <= kNumHeapTypes);
- return heapType - 1;
- }
-
- void Device::DeallocateMemory(ResourceMemoryAllocation& allocation) {
- CommittedResourceAllocator* allocator = nullptr;
- D3D12_HEAP_PROPERTIES heapProp;
- ToBackend(allocation.GetResourceHeap())
- ->GetD3D12Resource()
- ->GetHeapProperties(&heapProp, nullptr);
- const size_t heapTypeIndex = GetD3D12HeapTypeToIndex(heapProp.Type);
- ASSERT(heapTypeIndex < kNumHeapTypes);
- allocator = mDirectResourceAllocators[heapTypeIndex].get();
- allocator->Deallocate(allocation);
-
- // Invalidate the underlying resource heap in case the client accidentally
- // calls DeallocateMemory again using the same allocation.
- allocation.Invalidate();
+ void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
+ mResourceAllocatorManager->DeallocateMemory(allocation);
}
- ResultOrError<ResourceMemoryAllocation> Device::AllocateMemory(
+ ResultOrError<ResourceHeapAllocation> Device::AllocateMemory(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
D3D12_RESOURCE_STATES initialUsage,
D3D12_HEAP_FLAGS heapFlags) {
- const size_t heapTypeIndex = GetD3D12HeapTypeToIndex(heapType);
- ASSERT(heapTypeIndex < kNumHeapTypes);
-
- // Get the direct allocator using a tightly sized heap (aka CreateCommittedResource).
- CommittedResourceAllocator* allocator = mDirectResourceAllocators[heapTypeIndex].get();
- if (allocator == nullptr) {
- mDirectResourceAllocators[heapTypeIndex] =
- std::make_unique<CommittedResourceAllocator>(this, heapType);
- allocator = mDirectResourceAllocators[heapTypeIndex].get();
- }
-
- ResourceMemoryAllocation allocation;
- DAWN_TRY_ASSIGN(allocation,
- allocator->Allocate(resourceDescriptor, initialUsage, heapFlags));
-
- return allocation;
+ return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor, initialUsage,
+ heapFlags);
}
TextureBase* Device::WrapSharedHandle(const TextureDescriptor* descriptor,
@@ -402,6 +375,6 @@ namespace dawn_native { namespace d3d12 {
return nullptr;
}
- return new Texture(this, descriptor, d3d12Resource.Get());
+ return new Texture(this, descriptor, std::move(d3d12Resource));
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
index 237c8b4b4e7..0a00da293b4 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
@@ -19,9 +19,9 @@
#include "common/SerialQueue.h"
#include "dawn_native/Device.h"
-#include "dawn_native/d3d12/CommittedResourceAllocatorD3D12.h"
+#include "dawn_native/d3d12/CommandRecordingContext.h"
#include "dawn_native/d3d12/Forward.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
+#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
#include <memory>
@@ -32,8 +32,13 @@ namespace dawn_native { namespace d3d12 {
class MapRequestTracker;
class PlatformFunctions;
class ResourceAllocator;
+ class ResourceAllocatorManager;
- void ASSERT_SUCCESS(HRESULT hr);
+#define ASSERT_SUCCESS(hr) \
+ { \
+ HRESULT succeeded = hr; \
+ ASSERT(SUCCEEDED(succeeded)); \
+ }
// Definition of backend types
class Device : public DeviceBase {
@@ -48,7 +53,7 @@ namespace dawn_native { namespace d3d12 {
Serial GetCompletedCommandSerial() const final override;
Serial GetLastSubmittedCommandSerial() const final override;
- void TickImpl() override;
+ MaybeError TickImpl() override;
ComPtr<ID3D12Device> GetD3D12Device() const;
ComPtr<ID3D12CommandQueue> GetCommandQueue() const;
@@ -60,20 +65,20 @@ namespace dawn_native { namespace d3d12 {
DescriptorHeapAllocator* GetDescriptorHeapAllocator() const;
MapRequestTracker* GetMapRequestTracker() const;
ResourceAllocator* GetResourceAllocator() const;
+ CommandAllocatorManager* GetCommandAllocatorManager() const;
const PlatformFunctions* GetFunctions() const;
ComPtr<IDXGIFactory4> GetFactory() const;
- void OpenCommandList(ComPtr<ID3D12GraphicsCommandList>* commandList);
- ComPtr<ID3D12GraphicsCommandList> GetPendingCommandList();
+ ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
Serial GetPendingCommandSerial() const override;
- void NextSerial();
- void WaitForSerial(Serial serial);
+ MaybeError NextSerial();
+ MaybeError WaitForSerial(Serial serial);
void ReferenceUntilUnused(ComPtr<IUnknown> object);
- void ExecuteCommandLists(std::initializer_list<ID3D12CommandList*> commandLists);
+ MaybeError ExecuteCommandContext(CommandRecordingContext* commandContext);
ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
@@ -82,13 +87,13 @@ namespace dawn_native { namespace d3d12 {
uint64_t destinationOffset,
uint64_t size) override;
- ResultOrError<ResourceMemoryAllocation> AllocateMemory(
+ ResultOrError<ResourceHeapAllocation> AllocateMemory(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
D3D12_RESOURCE_STATES initialUsage,
D3D12_HEAP_FLAGS heapFlags);
- void DeallocateMemory(ResourceMemoryAllocation& allocation);
+ void DeallocateMemory(ResourceHeapAllocation& allocation);
TextureBase* WrapSharedHandle(const TextureDescriptor* descriptor, HANDLE sharedHandle);
@@ -115,8 +120,6 @@ namespace dawn_native { namespace d3d12 {
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
- size_t GetD3D12HeapTypeToIndex(D3D12_HEAP_TYPE heapType) const;
-
Serial mCompletedSerial = 0;
Serial mLastSubmittedSerial = 0;
ComPtr<ID3D12Fence> mFence;
@@ -129,10 +132,7 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectSignature;
- struct PendingCommandList {
- ComPtr<ID3D12GraphicsCommandList> commandList;
- bool open = false;
- } mPendingCommands;
+ CommandRecordingContext mPendingCommands;
SerialQueue<ComPtr<IUnknown>> mUsedComObjectRefs;
@@ -140,20 +140,7 @@ namespace dawn_native { namespace d3d12 {
std::unique_ptr<DescriptorHeapAllocator> mDescriptorHeapAllocator;
std::unique_ptr<MapRequestTracker> mMapRequestTracker;
std::unique_ptr<ResourceAllocator> mResourceAllocator;
-
- static constexpr uint32_t kNumHeapTypes = 4u; // Number of D3D12_HEAP_TYPE
-
- static_assert(D3D12_HEAP_TYPE_READBACK <= kNumHeapTypes,
- "Readback heap type enum exceeds max heap types");
- static_assert(D3D12_HEAP_TYPE_UPLOAD <= kNumHeapTypes,
- "Upload heap type enum exceeds max heap types");
- static_assert(D3D12_HEAP_TYPE_DEFAULT <= kNumHeapTypes,
- "Default heap type enum exceeds max heap types");
- static_assert(D3D12_HEAP_TYPE_CUSTOM <= kNumHeapTypes,
- "Custom heap type enum exceeds max heap types");
-
- std::array<std::unique_ptr<CommittedResourceAllocator>, kNumHeapTypes>
- mDirectResourceAllocators;
+ std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
dawn_native::PCIInfo mPCIInfo;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h b/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
index f42f82430f4..ade12e3ac86 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
@@ -29,7 +29,6 @@ namespace dawn_native { namespace d3d12 {
class PipelineLayout;
class Queue;
class RenderPipeline;
- class ResourceHeap;
class Sampler;
class ShaderModule;
class StagingBuffer;
@@ -48,7 +47,6 @@ namespace dawn_native { namespace d3d12 {
using PipelineLayoutType = PipelineLayout;
using QueueType = Queue;
using RenderPipelineType = RenderPipeline;
- using ResourceHeapType = ResourceHeap;
using SamplerType = Sampler;
using ShaderModuleType = ShaderModule;
using StagingBufferType = StagingBuffer;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp
index 2ec24b59ab6..1210bb8d606 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp
@@ -95,7 +95,7 @@ namespace dawn_native { namespace d3d12 {
// TODO(cwallez@chromium.org) Currently we force the CPU to wait for the GPU to be finished
// with the buffer. Ideally the synchronization should be all done on the GPU.
- mDevice->WaitForSerial(mBufferSerials[mCurrentBuffer]);
+ ASSERT(mDevice->WaitForSerial(mBufferSerials[mCurrentBuffer]).IsSuccess());
return DAWN_SWAP_CHAIN_NO_ERROR;
}
@@ -105,7 +105,7 @@ namespace dawn_native { namespace d3d12 {
ASSERT_SUCCESS(mSwapChain->Present(1, 0));
// TODO(cwallez@chromium.org): Make the serial ticking implicit.
- mDevice->NextSerial();
+ ASSERT(mDevice->NextSerial().IsSuccess());
mBufferSerials[mCurrentBuffer] = mDevice->GetPendingCommandSerial();
return DAWN_SWAP_CHAIN_NO_ERROR;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
index 08b0e257c81..6c38e68c5d6 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "common/BitSetIterator.h"
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
@@ -54,8 +55,17 @@ namespace dawn_native { namespace d3d12 {
}
} // anonymous namespace
- PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
- : PipelineLayoutBase(device, descriptor) {
+ ResultOrError<PipelineLayout*> PipelineLayout::Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor) {
+ std::unique_ptr<PipelineLayout> layout =
+ std::make_unique<PipelineLayout>(device, descriptor);
+ DAWN_TRY(layout->Initialize());
+ return layout.release();
+ }
+
+ MaybeError PipelineLayout::Initialize() {
+ Device* device = ToBackend(GetDevice());
D3D12_ROOT_PARAMETER rootParameters[kMaxBindGroups * 2 + kMaxDynamicBufferCount];
// A root parameter is one of these types
@@ -117,7 +127,7 @@ namespace dawn_native { namespace d3d12 {
const auto& shaderRegisters = bindGroupLayout->GetBindingOffsets();
// Init root descriptors in root signatures.
- for (uint32_t dynamicBinding : IterateBitSet(groupInfo.dynamic)) {
+ for (uint32_t dynamicBinding : IterateBitSet(groupInfo.hasDynamicOffset)) {
D3D12_ROOT_PARAMETER* rootParameter = &rootParameters[parameterIndex];
// Setup root descriptor.
@@ -148,11 +158,15 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3DBlob> signature;
ComPtr<ID3DBlob> error;
- ASSERT_SUCCESS(device->GetFunctions()->d3d12SerializeRootSignature(
- &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error));
- ASSERT_SUCCESS(device->GetD3D12Device()->CreateRootSignature(
- 0, signature->GetBufferPointer(), signature->GetBufferSize(),
- IID_PPV_ARGS(&mRootSignature)));
+ DAWN_TRY(CheckHRESULT(
+ device->GetFunctions()->d3d12SerializeRootSignature(
+ &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error),
+ "D3D12 serialize root signature"));
+ DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateRootSignature(
+ 0, signature->GetBufferPointer(), signature->GetBufferSize(),
+ IID_PPV_ARGS(&mRootSignature)),
+ "D3D12 create root signature"));
+ return {};
}
uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(uint32_t group) const {
@@ -172,7 +186,7 @@ namespace dawn_native { namespace d3d12 {
uint32_t PipelineLayout::GetDynamicRootParameterIndex(uint32_t group, uint32_t binding) const {
ASSERT(group < kMaxBindGroups);
ASSERT(binding < kMaxBindingsPerGroup);
- ASSERT(GetBindGroupLayout(group)->GetBindingInfo().dynamic[binding]);
+ ASSERT(GetBindGroupLayout(group)->GetBindingInfo().hasDynamicOffset[binding]);
return mDynamicRootParameterIndices[group][binding];
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
index b2ee9e6bbdb..1d6c7e56b9d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
@@ -25,7 +25,8 @@ namespace dawn_native { namespace d3d12 {
class PipelineLayout : public PipelineLayoutBase {
public:
- PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+ static ResultOrError<PipelineLayout*> Create(Device* device,
+ const PipelineLayoutDescriptor* descriptor);
uint32_t GetCbvUavSrvRootParameterIndex(uint32_t group) const;
uint32_t GetSamplerRootParameterIndex(uint32_t group) const;
@@ -36,6 +37,8 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12RootSignature> GetRootSignature() const;
private:
+ using PipelineLayoutBase::PipelineLayoutBase;
+ MaybeError Initialize();
std::array<uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
std::array<uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
std::array<std::array<uint32_t, kMaxBindingsPerGroup>, kMaxBindGroups>
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
index 063942d8c6d..8c50bd78b9a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/d3d12/QueueD3D12.h"
#include "dawn_native/d3d12/CommandBufferD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -22,20 +23,21 @@ namespace dawn_native { namespace d3d12 {
Queue::Queue(Device* device) : QueueBase(device) {
}
- void Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
Device* device = ToBackend(GetDevice());
device->Tick();
- device->OpenCommandList(&mCommandList);
+ DAWN_TRY(mCommandContext.Open(device->GetD3D12Device().Get(),
+ device->GetCommandAllocatorManager()));
for (uint32_t i = 0; i < commandCount; ++i) {
- ToBackend(commands[i])->RecordCommands(mCommandList, i);
+ DAWN_TRY(ToBackend(commands[i])->RecordCommands(&mCommandContext, i));
}
- ASSERT_SUCCESS(mCommandList->Close());
- device->ExecuteCommandLists({mCommandList.Get()});
+ DAWN_TRY(device->ExecuteCommandContext(&mCommandContext));
- device->NextSerial();
+ DAWN_TRY(device->NextSerial());
+ return {};
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
index 117d6eeb972..121d19c6b75 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
@@ -17,6 +17,7 @@
#include "dawn_native/Queue.h"
+#include "dawn_native/d3d12/CommandRecordingContext.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
@@ -29,9 +30,9 @@ namespace dawn_native { namespace d3d12 {
Queue(Device* device);
private:
- void SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- ComPtr<ID3D12GraphicsCommandList> mCommandList;
+ CommandRecordingContext mCommandContext;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
index 92c286535af..82fd037d056 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
#include "common/Assert.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
@@ -288,9 +289,17 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
- RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
- : RenderPipelineBase(device, descriptor),
- mD3d12PrimitiveTopology(D3D12PrimitiveTopology(GetPrimitiveTopology())) {
+ ResultOrError<RenderPipeline*> RenderPipeline::Create(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ std::unique_ptr<RenderPipeline> pipeline =
+ std::make_unique<RenderPipeline>(device, descriptor);
+ DAWN_TRY(pipeline->Initialize(descriptor));
+ return pipeline.release();
+ }
+
+ MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor* descriptor) {
+ Device* device = ToBackend(GetDevice());
uint32_t compileFlags = 0;
#if defined(_DEBUG)
// Enable better shader debugging with the graphics debugging tools.
@@ -391,8 +400,12 @@ namespace dawn_native { namespace d3d12 {
descriptorD3D12.SampleDesc.Count = GetSampleCount();
descriptorD3D12.SampleDesc.Quality = 0;
- ASSERT_SUCCESS(device->GetD3D12Device()->CreateGraphicsPipelineState(
- &descriptorD3D12, IID_PPV_ARGS(&mPipelineState)));
+ mD3d12PrimitiveTopology = D3D12PrimitiveTopology(GetPrimitiveTopology());
+
+ DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateGraphicsPipelineState(
+ &descriptorD3D12, IID_PPV_ARGS(&mPipelineState)),
+ "D3D12 create graphics pipeline state"));
+ return {};
}
RenderPipeline::~RenderPipeline() {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
index b9c9029eef0..affd5fe7336 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
@@ -25,13 +25,16 @@ namespace dawn_native { namespace d3d12 {
class RenderPipeline : public RenderPipelineBase {
public:
- RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
+ static ResultOrError<RenderPipeline*> Create(Device* device,
+ const RenderPipelineDescriptor* descriptor);
~RenderPipeline();
D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
ComPtr<ID3D12PipelineState> GetPipelineState();
private:
+ using RenderPipelineBase::RenderPipelineBase;
+ MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
new file mode 100644
index 00000000000..b18c998fb83
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
@@ -0,0 +1,68 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
+#include "dawn_native/d3d12/Forward.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
+ }
+
+ ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage,
+ D3D12_HEAP_FLAGS heapFlags) {
+ const size_t heapTypeIndex = GetD3D12HeapTypeToIndex(heapType);
+ ASSERT(heapTypeIndex < kNumHeapTypes);
+
+ // Get the direct allocator using a tightly sized heap (aka CreateCommittedResource).
+ CommittedResourceAllocator* allocator = mDirectResourceAllocators[heapTypeIndex].get();
+ if (allocator == nullptr) {
+ mDirectResourceAllocators[heapTypeIndex] =
+ std::make_unique<CommittedResourceAllocator>(mDevice, heapType);
+ allocator = mDirectResourceAllocators[heapTypeIndex].get();
+ }
+
+ ResourceHeapAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation,
+ allocator->Allocate(resourceDescriptor, initialUsage, heapFlags));
+
+ return allocation;
+ }
+
+ size_t ResourceAllocatorManager::GetD3D12HeapTypeToIndex(D3D12_HEAP_TYPE heapType) const {
+ ASSERT(heapType > 0);
+ ASSERT(static_cast<uint32_t>(heapType) <= kNumHeapTypes);
+ return heapType - 1;
+ }
+
+ void ResourceAllocatorManager::DeallocateMemory(ResourceHeapAllocation& allocation) {
+ if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return;
+ }
+ CommittedResourceAllocator* allocator = nullptr;
+ D3D12_HEAP_PROPERTIES heapProp;
+ allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
+ const size_t heapTypeIndex = GetD3D12HeapTypeToIndex(heapProp.Type);
+ ASSERT(heapTypeIndex < kNumHeapTypes);
+ allocator = mDirectResourceAllocators[heapTypeIndex].get();
+ allocator->Deallocate(allocation);
+
+ // Invalidate the underlying resource heap in case the client accidentally
+ // calls DeallocateMemory again using the same allocation.
+ allocation.Invalidate();
+ }
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
new file mode 100644
index 00000000000..d8f1cdb30ee
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
@@ -0,0 +1,62 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
+#define DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
+
+#include "dawn_native/d3d12/CommittedResourceAllocatorD3D12.h"
+
+#include <array>
+
+namespace dawn_native { namespace d3d12 {
+
+ class Device;
+
+ // Manages a list of resource allocators used by the device to create resources using multiple
+ // allocation methods.
+ class ResourceAllocatorManager {
+ public:
+ ResourceAllocatorManager(Device* device);
+
+ ResultOrError<ResourceHeapAllocation> AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage,
+ D3D12_HEAP_FLAGS heapFlags);
+
+ void DeallocateMemory(ResourceHeapAllocation& allocation);
+
+ private:
+ size_t GetD3D12HeapTypeToIndex(D3D12_HEAP_TYPE heapType) const;
+
+ Device* mDevice;
+
+ static constexpr uint32_t kNumHeapTypes = 4u; // Number of D3D12_HEAP_TYPE
+
+ static_assert(D3D12_HEAP_TYPE_READBACK <= kNumHeapTypes,
+ "Readback heap type enum exceeds max heap types");
+ static_assert(D3D12_HEAP_TYPE_UPLOAD <= kNumHeapTypes,
+ "Upload heap type enum exceeds max heap types");
+ static_assert(D3D12_HEAP_TYPE_DEFAULT <= kNumHeapTypes,
+ "Default heap type enum exceeds max heap types");
+ static_assert(D3D12_HEAP_TYPE_CUSTOM <= kNumHeapTypes,
+ "Custom heap type enum exceeds max heap types");
+
+ std::array<std::unique_ptr<CommittedResourceAllocator>, kNumHeapTypes>
+ mDirectResourceAllocators;
+ };
+
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
new file mode 100644
index 00000000000..158e8900d19
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
@@ -0,0 +1,33 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
+
+#include <utility>
+
+namespace dawn_native { namespace d3d12 {
+ ResourceHeapAllocation::ResourceHeapAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ComPtr<ID3D12Resource> resource)
+ : ResourceMemoryAllocation(info, offset, nullptr), mResource(std::move(resource)) {
+ }
+
+ ComPtr<ID3D12Resource> ResourceHeapAllocation::GetD3D12Resource() const {
+ return mResource;
+ }
+
+ D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
+ return mResource->GetGPUVirtualAddress();
+ }
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
index 18b342a691d..8230857dbfb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
@@ -12,20 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef DAWNNATIVE_D3D12_RESOURCEHEAPD3D12_H_
-#define DAWNNATIVE_D3D12_RESOURCEHEAPD3D12_H_
+#ifndef DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
+#define DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
-#include "dawn_native/ResourceHeap.h"
+#include "dawn_native/ResourceMemoryAllocation.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
- // Wrapper for physical memory used with or without a resource object.
- class ResourceHeap : public ResourceHeapBase {
+ class ResourceHeapAllocation : public ResourceMemoryAllocation {
public:
- ResourceHeap(ComPtr<ID3D12Resource> resource);
-
- ~ResourceHeap() = default;
+ ResourceHeapAllocation() = default;
+ ResourceHeapAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ComPtr<ID3D12Resource> resource);
+ ~ResourceHeapAllocation() = default;
ComPtr<ID3D12Resource> GetD3D12Resource() const;
D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
@@ -35,4 +36,4 @@ namespace dawn_native { namespace d3d12 {
};
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_RESOURCEHEAPD3D12_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
index 4698459b3a4..872e3971ecb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
@@ -37,7 +37,6 @@ namespace dawn_native { namespace d3d12 {
// If these options are changed, the values in DawnSPIRVCrossHLSLFastFuzzer.cpp need to be
// updated.
spirv_cross::CompilerGLSL::Options options_glsl;
- options_glsl.vertex.flip_vert_y = true;
compiler.set_common_options(options_glsl);
spirv_cross::CompilerHLSL::Options options_hlsl;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
index cab3a18413e..9e6c2bd7c6c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
@@ -14,7 +14,6 @@
#include "dawn_native/d3d12/StagingBufferD3D12.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/ResourceHeapD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -56,7 +55,7 @@ namespace dawn_native { namespace d3d12 {
}
ID3D12Resource* StagingBuffer::GetResource() const {
- return ToBackend(mUploadHeap.GetResourceHeap())->GetD3D12Resource().Get();
+ return mUploadHeap.GetD3D12Resource().Get();
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h
index 633be53c32f..ebba0c67185 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h
@@ -15,8 +15,8 @@
#ifndef DAWNNATIVE_STAGINGBUFFERD3D12_H_
#define DAWNNATIVE_STAGINGBUFFERD3D12_H_
-#include "dawn_native/ResourceMemoryAllocation.h"
#include "dawn_native/StagingBuffer.h"
+#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
@@ -34,7 +34,7 @@ namespace dawn_native { namespace d3d12 {
private:
Device* mDevice;
- ResourceMemoryAllocation mUploadHeap;
+ ResourceHeapAllocation mUploadHeap;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
index 0dffc29481f..7d24b35b551 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
@@ -48,13 +48,18 @@ namespace dawn_native { namespace d3d12 {
return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture);
}
- void SwapChain::OnBeforePresent(TextureBase* texture) {
+ MaybeError SwapChain::OnBeforePresent(TextureBase* texture) {
Device* device = ToBackend(GetDevice());
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
// Perform the necessary transition for the texture to be presented.
- ToBackend(texture)->TransitionUsageNow(device->GetPendingCommandList(), mTextureUsage);
+ ToBackend(texture)->TransitionUsageNow(commandContext, mTextureUsage);
+
+ DAWN_TRY(device->ExecuteCommandContext(nullptr));
- device->ExecuteCommandLists({});
+ return {};
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
index 833ba48b81c..151994c9b3e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
@@ -28,7 +28,7 @@ namespace dawn_native { namespace d3d12 {
protected:
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- void OnBeforePresent(TextureBase* texture) override;
+ MaybeError OnBeforePresent(TextureBase* texture) override;
dawn::TextureUsage mTextureUsage;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
index dbc88642ab3..a26c94843a0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
@@ -270,8 +270,18 @@ namespace dawn_native { namespace d3d12 {
return {};
}
+ ResultOrError<TextureBase*> Texture::Create(Device* device,
+ const TextureDescriptor* descriptor) {
+ Ref<Texture> dawnTexture = AcquireRef(new Texture(device, descriptor));
+ DAWN_TRY(dawnTexture->InitializeAsInternalTexture());
+ return dawnTexture.Detach();
+ }
+
Texture::Texture(Device* device, const TextureDescriptor* descriptor)
: TextureBase(device, descriptor, TextureState::OwnedInternal) {
+ }
+
+ MaybeError Texture::InitializeAsInternalTexture() {
D3D12_RESOURCE_DESC resourceDescriptor;
resourceDescriptor.Dimension = D3D12TextureDimension(GetDimension());
resourceDescriptor.Alignment = 0;
@@ -283,7 +293,7 @@ namespace dawn_native { namespace d3d12 {
resourceDescriptor.DepthOrArraySize = GetDepthOrArraySize();
resourceDescriptor.MipLevels = static_cast<UINT16>(GetNumMipLevels());
resourceDescriptor.Format = D3D12TextureFormat(GetFormat().format);
- resourceDescriptor.SampleDesc.Count = descriptor->sampleCount;
+ resourceDescriptor.SampleDesc.Count = GetSampleCount();
// TODO(bryan.bernhart@intel.com): investigate how to specify standard MSAA sample pattern.
resourceDescriptor.SampleDesc.Quality = 0;
resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
@@ -295,19 +305,26 @@ namespace dawn_native { namespace d3d12 {
->Allocate(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
D3D12_RESOURCE_STATE_COMMON);
+ Device* device = ToBackend(GetDevice());
+
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- device->ConsumedError(ClearTexture(device->GetPendingCommandList(), 0,
- GetNumMipLevels(), 0, GetArrayLayers(),
- TextureBase::ClearValue::NonZero));
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+ DAWN_TRY(ClearTexture(commandContext, 0, GetNumMipLevels(), 0, GetArrayLayers(),
+ TextureBase::ClearValue::NonZero));
}
+
+ return {};
}
// With this constructor, the lifetime of the ID3D12Resource is externally managed.
Texture::Texture(Device* device,
const TextureDescriptor* descriptor,
- ID3D12Resource* nativeTexture)
- : TextureBase(device, descriptor, TextureState::OwnedExternal), mResource(nativeTexture) {
- SetIsSubresourceContentInitialized(0, descriptor->mipLevelCount, 0,
+ ComPtr<ID3D12Resource> nativeTexture)
+ : TextureBase(device, descriptor, TextureState::OwnedExternal),
+ mResource(std::move(nativeTexture)) {
+ SetIsSubresourceContentInitialized(true, 0, descriptor->mipLevelCount, 0,
descriptor->arrayLayerCount);
}
@@ -341,16 +358,18 @@ namespace dawn_native { namespace d3d12 {
// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
// ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
// cause subsequent errors.
- bool Texture::TransitionUsageAndGetResourceBarrier(D3D12_RESOURCE_BARRIER* barrier,
+ bool Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
dawn::TextureUsage newUsage) {
- return TransitionUsageAndGetResourceBarrier(barrier,
+ return TransitionUsageAndGetResourceBarrier(commandContext, barrier,
D3D12TextureUsage(newUsage, GetFormat()));
}
// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
// ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
// cause subsequent errors.
- bool Texture::TransitionUsageAndGetResourceBarrier(D3D12_RESOURCE_BARRIER* barrier,
+ bool Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
D3D12_RESOURCE_STATES newState) {
// Avoid transitioning the texture when it isn't needed.
// TODO(cwallez@chromium.org): Need some form of UAV barriers at some point.
@@ -417,17 +436,17 @@ namespace dawn_native { namespace d3d12 {
return true;
}
- void Texture::TransitionUsageNow(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void Texture::TransitionUsageNow(CommandRecordingContext* commandContext,
dawn::TextureUsage usage) {
- TransitionUsageNow(commandList, D3D12TextureUsage(usage, GetFormat()));
+ TransitionUsageNow(commandContext, D3D12TextureUsage(usage, GetFormat()));
}
- void Texture::TransitionUsageNow(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void Texture::TransitionUsageNow(CommandRecordingContext* commandContext,
D3D12_RESOURCE_STATES newState) {
D3D12_RESOURCE_BARRIER barrier;
- if (TransitionUsageAndGetResourceBarrier(&barrier, newState)) {
- commandList->ResourceBarrier(1, &barrier);
+ if (TransitionUsageAndGetResourceBarrier(commandContext, &barrier, newState)) {
+ commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
}
}
@@ -475,7 +494,7 @@ namespace dawn_native { namespace d3d12 {
return dsvDesc;
}
- MaybeError Texture::ClearTexture(ComPtr<ID3D12GraphicsCommandList> commandList,
+ MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
@@ -483,19 +502,22 @@ namespace dawn_native { namespace d3d12 {
TextureBase::ClearValue clearValue) {
// TODO(jiawei.shao@intel.com): initialize the textures in compressed formats with copies.
if (GetFormat().isCompressed) {
- SetIsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
+ SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
layerCount);
return {};
}
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
Device* device = ToBackend(GetDevice());
DescriptorHeapAllocator* descriptorHeapAllocator = device->GetDescriptorHeapAllocator();
uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
if (GetFormat().isRenderable) {
if (GetFormat().HasDepthOrStencil()) {
- TransitionUsageNow(commandList, D3D12_RESOURCE_STATE_DEPTH_WRITE);
- DescriptorHeapHandle dsvHeap =
- descriptorHeapAllocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_DSV, 1);
+ TransitionUsageNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE);
+ DescriptorHeapHandle dsvHeap;
+ DAWN_TRY_ASSIGN(dsvHeap, descriptorHeapAllocator->AllocateCPUHeap(
+ D3D12_DESCRIPTOR_HEAP_TYPE_DSV, 1));
D3D12_CPU_DESCRIPTOR_HANDLE dsvHandle = dsvHeap.GetCPUHandle(0);
D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc = GetDSVDescriptor(baseMipLevel);
device->GetD3D12Device()->CreateDepthStencilView(mResource.Get(), &dsvDesc,
@@ -512,11 +534,14 @@ namespace dawn_native { namespace d3d12 {
commandList->ClearDepthStencilView(dsvHandle, clearFlags, clearColor, clearColor, 0,
nullptr);
} else {
- TransitionUsageNow(commandList, D3D12_RESOURCE_STATE_RENDER_TARGET);
- DescriptorHeapHandle rtvHeap =
- descriptorHeapAllocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_RTV, 1);
+ TransitionUsageNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET);
+ DescriptorHeapHandle rtvHeap;
+ DAWN_TRY_ASSIGN(rtvHeap, descriptorHeapAllocator->AllocateCPUHeap(
+ D3D12_DESCRIPTOR_HEAP_TYPE_RTV, 1));
D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetCPUHandle(0);
- const float clearColorRGBA[4] = {clearColor, clearColor, clearColor, clearColor};
+ const float fClearColor = static_cast<float>(clearColor);
+ const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor,
+ fClearColor};
// TODO(natlee@microsoft.com): clear all array layers for 2D array textures
for (uint32_t i = baseMipLevel; i < baseMipLevel + levelCount; i++) {
@@ -538,47 +563,53 @@ namespace dawn_native { namespace d3d12 {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
}
uint32_t bufferSize = static_cast<uint32_t>(bufferSize64);
- DynamicUploader* uploader = nullptr;
- DAWN_TRY_ASSIGN(uploader, device->GetDynamicUploader());
+ DynamicUploader* uploader = device->GetDynamicUploader();
UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, uploader->Allocate(bufferSize));
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial()));
std::fill(reinterpret_cast<uint32_t*>(uploadHandle.mappedBuffer),
reinterpret_cast<uint32_t*>(uploadHandle.mappedBuffer + bufferSize),
clearColor);
- TransitionUsageNow(commandList, D3D12_RESOURCE_STATE_COPY_DEST);
+ TransitionUsageNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST);
// compute d3d12 texture copy locations for texture and buffer
Extent3D copySize = {GetSize().width, GetSize().height, 1};
TextureCopySplit copySplit = ComputeTextureCopySplit(
{0, 0, 0}, copySize, GetFormat(), uploadHandle.startOffset, rowPitch, 0);
- D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(this, baseMipLevel, baseArrayLayer);
- for (uint32_t i = 0; i < copySplit.count; ++i) {
- TextureCopySplit::CopyInfo& info = copySplit.copies[i];
-
- D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(
- this, ToBackend(uploadHandle.stagingBuffer)->GetResource(), info.bufferSize,
- copySplit.offset, rowPitch);
- D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
-
- // copy the buffer filled with clear color to the texture
- commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
- info.textureOffset.y, info.textureOffset.z,
- &bufferLocation, &sourceRegion);
+
+ for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
+ ++layer) {
+ D3D12_TEXTURE_COPY_LOCATION textureLocation =
+ ComputeTextureCopyLocationForTexture(this, level, layer);
+ for (uint32_t i = 0; i < copySplit.count; ++i) {
+ TextureCopySplit::CopyInfo& info = copySplit.copies[i];
+
+ D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+ ComputeBufferLocationForCopyTextureRegion(
+ this, ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+ info.bufferSize, copySplit.offset, rowPitch);
+ D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
+
+ // copy the buffer filled with clear color to the texture
+ commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
+ info.textureOffset.y, info.textureOffset.z,
+ &bufferLocation, &sourceRegion);
+ }
+ }
}
}
if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
+ SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
layerCount);
GetDevice()->IncrementLazyClearCountForTesting();
}
return {};
}
- void Texture::EnsureSubresourceContentInitialized(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
@@ -590,7 +621,7 @@ namespace dawn_native { namespace d3d12 {
layerCount)) {
// If subresource has not been initialized, clear it to black as it could contain
// dirty bits from recycled memory
- GetDevice()->ConsumedError(ClearTexture(commandList, baseMipLevel, levelCount,
+ GetDevice()->ConsumedError(ClearTexture(commandContext, baseMipLevel, levelCount,
baseArrayLayer, layerCount,
TextureBase::ClearValue::Zero));
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
index af32dd3cf84..162b50ee2f2 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
@@ -22,6 +22,7 @@
namespace dawn_native { namespace d3d12 {
+ class CommandRecordingContext;
class Device;
DXGI_FORMAT D3D12TextureFormat(dawn::TextureFormat format);
@@ -31,33 +32,39 @@ namespace dawn_native { namespace d3d12 {
class Texture : public TextureBase {
public:
- Texture(Device* device, const TextureDescriptor* descriptor);
- Texture(Device* device, const TextureDescriptor* descriptor, ID3D12Resource* nativeTexture);
+ static ResultOrError<TextureBase*> Create(Device* device,
+ const TextureDescriptor* descriptor);
+ Texture(Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> nativeTexture);
~Texture();
DXGI_FORMAT GetD3D12Format() const;
ID3D12Resource* GetD3D12Resource() const;
- bool TransitionUsageAndGetResourceBarrier(D3D12_RESOURCE_BARRIER* barrier,
+ bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
dawn::TextureUsage newUsage);
- void TransitionUsageNow(ComPtr<ID3D12GraphicsCommandList> commandList,
- dawn::TextureUsage usage);
- void TransitionUsageNow(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void TransitionUsageNow(CommandRecordingContext* commandContext, dawn::TextureUsage usage);
+ void TransitionUsageNow(CommandRecordingContext* commandContext,
D3D12_RESOURCE_STATES newState);
D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(uint32_t baseMipLevel,
uint32_t baseArrayLayer,
uint32_t layerCount) const;
D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t baseMipLevel) const;
- void EnsureSubresourceContentInitialized(ComPtr<ID3D12GraphicsCommandList> commandList,
+ void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
uint32_t layerCount);
private:
+ Texture(Device* device, const TextureDescriptor* descriptor);
+ MaybeError InitializeAsInternalTexture();
+
// Dawn API
void DestroyImpl() override;
- MaybeError ClearTexture(ComPtr<ID3D12GraphicsCommandList> commandList,
+ MaybeError ClearTexture(CommandRecordingContext* commandContext,
uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
@@ -66,7 +73,8 @@ namespace dawn_native { namespace d3d12 {
UINT16 GetDepthOrArraySize();
- bool TransitionUsageAndGetResourceBarrier(D3D12_RESOURCE_BARRIER* barrier,
+ bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
D3D12_RESOURCE_STATES newState);
ComPtr<ID3D12Resource> mResource;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
index 80287ddbb34..646a387bace 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
@@ -15,15 +15,19 @@
#include "dawn_native/metal/BackendMTL.h"
#include "common/Constants.h"
+#include "common/Platform.h"
#include "dawn_native/Instance.h"
#include "dawn_native/MetalBackend.h"
#include "dawn_native/metal/DeviceMTL.h"
-#import <IOKit/IOKitLib.h>
+#if defined(DAWN_PLATFORM_MACOS)
+# import <IOKit/IOKitLib.h>
+#endif
namespace dawn_native { namespace metal {
namespace {
+
struct PCIIDs {
uint32_t vendorId;
uint32_t deviceId;
@@ -34,6 +38,7 @@ namespace dawn_native { namespace metal {
uint32_t vendorId;
};
+#if defined(DAWN_PLATFORM_MACOS)
const Vendor kVendors[] = {{"AMD", kVendorID_AMD},
{"Radeon", kVendorID_AMD},
{"Intel", kVendorID_Intel},
@@ -150,6 +155,19 @@ namespace dawn_native { namespace metal {
NSOperatingSystemVersion macOS10_11 = {10, 11, 0};
return [NSProcessInfo.processInfo isOperatingSystemAtLeastVersion:macOS10_11];
}
+#elif defined(DAWN_PLATFORM_IOS)
+ MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+ DAWN_UNUSED(device);
+ *ids = PCIIDs{0, 0};
+ return {};
+ }
+
+ bool IsMetalSupported() {
+ return true;
+ }
+#else
+# error "Unsupported Apple platform."
+#endif
} // anonymous namespace
// The Metal backend's Adapter.
@@ -166,11 +184,17 @@ namespace dawn_native { namespace metal {
mPCIInfo.deviceId = ids.deviceId;
};
+#if defined(DAWN_PLATFORM_IOS)
+ mDeviceType = DeviceType::IntegratedGPU;
+#elif defined(DAWN_PLATFORM_MACOS)
if ([device isLowPower]) {
mDeviceType = DeviceType::IntegratedGPU;
} else {
mDeviceType = DeviceType::DiscreteGPU;
}
+#else
+# error "Unsupported Apple platform."
+#endif
InitializeSupportedExtensions();
}
@@ -184,9 +208,11 @@ namespace dawn_native { namespace metal {
return {new Device(this, mDevice, descriptor)};
}
void InitializeSupportedExtensions() {
+#if defined(DAWN_PLATFORM_MACOS)
if ([mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
mSupportedExtensions.EnableExtension(Extension::TextureCompressionBC);
}
+#endif
}
id<MTLDevice> mDevice = nil;
@@ -201,14 +227,27 @@ namespace dawn_native { namespace metal {
}
std::vector<std::unique_ptr<AdapterBase>> Backend::DiscoverDefaultAdapters() {
- NSArray<id<MTLDevice>>* devices = MTLCopyAllDevices();
-
std::vector<std::unique_ptr<AdapterBase>> adapters;
- for (id<MTLDevice> device in devices) {
- adapters.push_back(std::make_unique<Adapter>(GetInstance(), device));
- }
- [devices release];
+ if (@available(macOS 10.11, *)) {
+#if defined(DAWN_PLATFORM_MACOS)
+ NSArray<id<MTLDevice>>* devices = MTLCopyAllDevices();
+
+ for (id<MTLDevice> device in devices) {
+ adapters.push_back(std::make_unique<Adapter>(GetInstance(), device));
+ }
+
+ [devices release];
+#endif
+ } else if (@available(iOS 8.0, *)) {
+#if defined(DAWN_PLATFORM_IOS)
+ // iOS only has a single device so MTLCopyAllDevices doesn't exist there.
+ adapters.push_back(
+ std::make_unique<Adapter>(GetInstance(), MTLCreateSystemDefaultDevice()));
+#endif
+ } else {
+ UNREACHABLE();
+ }
return adapters;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
index dfe96f3e90a..076e2b8d69e 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
@@ -14,9 +14,13 @@
#include "dawn_native/metal/BufferMTL.h"
+#include "common/Math.h"
#include "dawn_native/metal/DeviceMTL.h"
namespace dawn_native { namespace metal {
+ // The size of uniform buffer and storage buffer need to be aligned to 16 bytes which is the
+ // largest alignment of supported data types
+ static constexpr uint32_t kMinUniformOrStorageBufferAlignment = 16u;
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
: BufferBase(device, descriptor) {
@@ -27,7 +31,15 @@ namespace dawn_native { namespace metal {
storageMode = MTLResourceStorageModePrivate;
}
- mMtlBuffer = [device->GetMTLDevice() newBufferWithLength:GetSize() options:storageMode];
+ uint32_t currentSize = GetSize();
+ // Metal validation layer requires the size of uniform buffer and storage buffer to be no
+ // less than the size of the buffer block defined in shader, and the overall size of the
+ // buffer must be aligned to the largest alignment of its members.
+ if (GetUsage() & (dawn::BufferUsage::Uniform | dawn::BufferUsage::Storage)) {
+ currentSize = Align(currentSize, kMinUniformOrStorageBufferAlignment);
+ }
+
+ mMtlBuffer = [device->GetMTLDevice() newBufferWithLength:currentSize options:storageMode];
}
Buffer::~Buffer() {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
index ef89ff2342a..db0794f4507 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
@@ -15,6 +15,7 @@
#include "dawn_native/metal/CommandBufferMTL.h"
#include "dawn_native/BindGroup.h"
+#include "dawn_native/BindGroupTracker.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
#include "dawn_native/RenderBundle.h"
@@ -206,7 +207,7 @@ namespace dawn_native { namespace metal {
// MSL code generated by SPIRV-Cross expects.
PerStage<std::array<uint32_t, kGenericMetalBufferSlots>> data;
- void Apply(RenderPipeline* pipeline, id<MTLRenderCommandEncoder> render) {
+ void Apply(id<MTLRenderCommandEncoder> render, RenderPipeline* pipeline) {
dawn::ShaderStage stagesToApply =
dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
@@ -234,7 +235,7 @@ namespace dawn_native { namespace metal {
dirtyStages ^= stagesToApply;
}
- void Apply(ComputePipeline* pipeline, id<MTLComputeCommandEncoder> compute) {
+ void Apply(id<MTLComputeCommandEncoder> compute, ComputePipeline* pipeline) {
if (!(dirtyStages & dawn::ShaderStage::Compute)) {
return;
}
@@ -253,128 +254,6 @@ namespace dawn_native { namespace metal {
}
};
- // Handles a call to SetBindGroup, directing the commands to the correct encoder.
- // There is a single function that takes both encoders to factor code. Other approaches like
- // templates wouldn't work because the name of methods are different between the two encoder
- // types.
- void ApplyBindGroup(uint32_t index,
- BindGroup* group,
- uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets,
- PipelineLayout* pipelineLayout,
- StorageBufferLengthTracker* lengthTracker,
- id<MTLRenderCommandEncoder> render,
- id<MTLComputeCommandEncoder> compute) {
- const auto& layout = group->GetLayout()->GetBindingInfo();
- uint32_t currentDynamicBufferIndex = 0;
-
- // TODO(kainino@chromium.org): Maintain buffers and offsets arrays in BindGroup
- // so that we only have to do one setVertexBuffers and one setFragmentBuffers
- // call here.
- for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
- auto stage = layout.visibilities[bindingIndex];
- bool hasVertStage = stage & dawn::ShaderStage::Vertex && render != nil;
- bool hasFragStage = stage & dawn::ShaderStage::Fragment && render != nil;
- bool hasComputeStage = stage & dawn::ShaderStage::Compute && compute != nil;
-
- uint32_t vertIndex = 0;
- uint32_t fragIndex = 0;
- uint32_t computeIndex = 0;
-
- if (hasVertStage) {
- vertIndex = pipelineLayout->GetBindingIndexInfo(
- SingleShaderStage::Vertex)[index][bindingIndex];
- }
- if (hasFragStage) {
- fragIndex = pipelineLayout->GetBindingIndexInfo(
- SingleShaderStage::Fragment)[index][bindingIndex];
- }
- if (hasComputeStage) {
- computeIndex = pipelineLayout->GetBindingIndexInfo(
- SingleShaderStage::Compute)[index][bindingIndex];
- }
-
- switch (layout.types[bindingIndex]) {
- case dawn::BindingType::UniformBuffer:
- case dawn::BindingType::StorageBuffer: {
- const BufferBinding& binding =
- group->GetBindingAsBufferBinding(bindingIndex);
- const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
- NSUInteger offset = binding.offset;
-
- // TODO(shaobo.yan@intel.com): Record bound buffer status to use
- // setBufferOffset to achieve better performance.
- if (layout.dynamic[bindingIndex]) {
- offset += dynamicOffsets[currentDynamicBufferIndex];
- currentDynamicBufferIndex++;
- }
-
- if (hasVertStage) {
- lengthTracker->data[SingleShaderStage::Vertex][vertIndex] =
- binding.size;
- lengthTracker->dirtyStages |= dawn::ShaderStage::Vertex;
- [render setVertexBuffers:&buffer
- offsets:&offset
- withRange:NSMakeRange(vertIndex, 1)];
- }
- if (hasFragStage) {
- lengthTracker->data[SingleShaderStage::Fragment][fragIndex] =
- binding.size;
- lengthTracker->dirtyStages |= dawn::ShaderStage::Fragment;
- [render setFragmentBuffers:&buffer
- offsets:&offset
- withRange:NSMakeRange(fragIndex, 1)];
- }
- if (hasComputeStage) {
- lengthTracker->data[SingleShaderStage::Compute][computeIndex] =
- binding.size;
- lengthTracker->dirtyStages |= dawn::ShaderStage::Compute;
- [compute setBuffers:&buffer
- offsets:&offset
- withRange:NSMakeRange(computeIndex, 1)];
- }
-
- } break;
-
- case dawn::BindingType::Sampler: {
- auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
- if (hasVertStage) {
- [render setVertexSamplerState:sampler->GetMTLSamplerState()
- atIndex:vertIndex];
- }
- if (hasFragStage) {
- [render setFragmentSamplerState:sampler->GetMTLSamplerState()
- atIndex:fragIndex];
- }
- if (hasComputeStage) {
- [compute setSamplerState:sampler->GetMTLSamplerState()
- atIndex:computeIndex];
- }
- } break;
-
- case dawn::BindingType::SampledTexture: {
- auto textureView = ToBackend(group->GetBindingAsTextureView(bindingIndex));
- if (hasVertStage) {
- [render setVertexTexture:textureView->GetMTLTexture()
- atIndex:vertIndex];
- }
- if (hasFragStage) {
- [render setFragmentTexture:textureView->GetMTLTexture()
- atIndex:fragIndex];
- }
- if (hasComputeStage) {
- [compute setTexture:textureView->GetMTLTexture() atIndex:computeIndex];
- }
- } break;
-
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
- UNREACHABLE();
- break;
- }
- }
- }
-
struct TextureBufferCopySplit {
static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
@@ -511,23 +390,173 @@ namespace dawn_native { namespace metal {
return copy;
}
+ // Keeps track of the dirty bind groups so they can be lazily applied when we know the
+ // pipeline state.
+ // Bind groups may be inherited because bind groups are packed in the buffer /
+ // texture tables in contiguous order.
+ class BindGroupTracker : public BindGroupTrackerBase<BindGroup*, true> {
+ public:
+ explicit BindGroupTracker(StorageBufferLengthTracker* lengthTracker)
+ : BindGroupTrackerBase(), mLengthTracker(lengthTracker) {
+ }
+
+ template <typename Encoder>
+ void Apply(Encoder encoder) {
+ for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ ApplyBindGroup(encoder, index, mBindGroups[index], mDynamicOffsetCounts[index],
+ mDynamicOffsets[index].data(), ToBackend(mPipelineLayout));
+ }
+ DidApply();
+ }
+
+ private:
+ // Handles a call to SetBindGroup, directing the commands to the correct encoder.
+ // There is a single function that takes both encoders to factor code. Other approaches
+ // like templates wouldn't work because the name of methods are different between the
+ // two encoder types.
+ void ApplyBindGroupImpl(id<MTLRenderCommandEncoder> render,
+ id<MTLComputeCommandEncoder> compute,
+ uint32_t index,
+ BindGroup* group,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets,
+ PipelineLayout* pipelineLayout) {
+ const auto& layout = group->GetLayout()->GetBindingInfo();
+ uint32_t currentDynamicBufferIndex = 0;
+
+ // TODO(kainino@chromium.org): Maintain buffers and offsets arrays in BindGroup
+ // so that we only have to do one setVertexBuffers and one setFragmentBuffers
+ // call here.
+ for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
+ auto stage = layout.visibilities[bindingIndex];
+ bool hasVertStage = stage & dawn::ShaderStage::Vertex && render != nil;
+ bool hasFragStage = stage & dawn::ShaderStage::Fragment && render != nil;
+ bool hasComputeStage = stage & dawn::ShaderStage::Compute && compute != nil;
+
+ uint32_t vertIndex = 0;
+ uint32_t fragIndex = 0;
+ uint32_t computeIndex = 0;
+
+ if (hasVertStage) {
+ vertIndex = pipelineLayout->GetBindingIndexInfo(
+ SingleShaderStage::Vertex)[index][bindingIndex];
+ }
+ if (hasFragStage) {
+ fragIndex = pipelineLayout->GetBindingIndexInfo(
+ SingleShaderStage::Fragment)[index][bindingIndex];
+ }
+ if (hasComputeStage) {
+ computeIndex = pipelineLayout->GetBindingIndexInfo(
+ SingleShaderStage::Compute)[index][bindingIndex];
+ }
+
+ switch (layout.types[bindingIndex]) {
+ case dawn::BindingType::UniformBuffer:
+ case dawn::BindingType::StorageBuffer: {
+ const BufferBinding& binding =
+ group->GetBindingAsBufferBinding(bindingIndex);
+ const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
+ NSUInteger offset = binding.offset;
+
+ // TODO(shaobo.yan@intel.com): Record bound buffer status to use
+ // setBufferOffset to achieve better performance.
+ if (layout.hasDynamicOffset[bindingIndex]) {
+ offset += dynamicOffsets[currentDynamicBufferIndex];
+ currentDynamicBufferIndex++;
+ }
+
+ if (hasVertStage) {
+ mLengthTracker->data[SingleShaderStage::Vertex][vertIndex] =
+ binding.size;
+ mLengthTracker->dirtyStages |= dawn::ShaderStage::Vertex;
+ [render setVertexBuffers:&buffer
+ offsets:&offset
+ withRange:NSMakeRange(vertIndex, 1)];
+ }
+ if (hasFragStage) {
+ mLengthTracker->data[SingleShaderStage::Fragment][fragIndex] =
+ binding.size;
+ mLengthTracker->dirtyStages |= dawn::ShaderStage::Fragment;
+ [render setFragmentBuffers:&buffer
+ offsets:&offset
+ withRange:NSMakeRange(fragIndex, 1)];
+ }
+ if (hasComputeStage) {
+ mLengthTracker->data[SingleShaderStage::Compute][computeIndex] =
+ binding.size;
+ mLengthTracker->dirtyStages |= dawn::ShaderStage::Compute;
+ [compute setBuffers:&buffer
+ offsets:&offset
+ withRange:NSMakeRange(computeIndex, 1)];
+ }
+
+ } break;
+
+ case dawn::BindingType::Sampler: {
+ auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+ if (hasVertStage) {
+ [render setVertexSamplerState:sampler->GetMTLSamplerState()
+ atIndex:vertIndex];
+ }
+ if (hasFragStage) {
+ [render setFragmentSamplerState:sampler->GetMTLSamplerState()
+ atIndex:fragIndex];
+ }
+ if (hasComputeStage) {
+ [compute setSamplerState:sampler->GetMTLSamplerState()
+ atIndex:computeIndex];
+ }
+ } break;
+
+ case dawn::BindingType::SampledTexture: {
+ auto textureView =
+ ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ if (hasVertStage) {
+ [render setVertexTexture:textureView->GetMTLTexture()
+ atIndex:vertIndex];
+ }
+ if (hasFragStage) {
+ [render setFragmentTexture:textureView->GetMTLTexture()
+ atIndex:fragIndex];
+ }
+ if (hasComputeStage) {
+ [compute setTexture:textureView->GetMTLTexture()
+ atIndex:computeIndex];
+ }
+ } break;
+
+ case dawn::BindingType::StorageTexture:
+ case dawn::BindingType::ReadonlyStorageBuffer:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+
+ template <typename... Args>
+ void ApplyBindGroup(id<MTLRenderCommandEncoder> encoder, Args&&... args) {
+ ApplyBindGroupImpl(encoder, nil, std::forward<Args&&>(args)...);
+ }
+
+ template <typename... Args>
+ void ApplyBindGroup(id<MTLComputeCommandEncoder> encoder, Args&&... args) {
+ ApplyBindGroupImpl(nil, encoder, std::forward<Args&&>(args)...);
+ }
+
+ StorageBufferLengthTracker* mLengthTracker;
+ };
+
// Keeps track of the dirty vertex buffer values so they can be lazily applied when we know
// all the relevant state.
class VertexInputBufferTracker {
public:
- void OnSetVertexBuffers(uint32_t startSlot,
- uint32_t count,
- const Ref<BufferBase>* buffers,
- const uint64_t* offsets) {
- for (uint32_t i = 0; i < count; ++i) {
- uint32_t slot = startSlot + i;
- mVertexBuffers[slot] = ToBackend(buffers[i].Get())->GetMTLBuffer();
- mVertexBufferOffsets[slot] = offsets[i];
- }
+ void OnSetVertexBuffer(uint32_t slot, Buffer* buffer, uint64_t offset) {
+ mVertexBuffers[slot] = buffer->GetMTLBuffer();
+ mVertexBufferOffsets[slot] = offset;
// Use 64 bit masks and make sure there are no shift UB
static_assert(kMaxVertexBuffers <= 8 * sizeof(unsigned long long) - 1, "");
- mDirtyVertexBuffers |= ((1ull << count) - 1ull) << startSlot;
+ mDirtyVertexBuffers |= 1ull << slot;
}
void OnSetPipeline(RenderPipeline* lastPipeline, RenderPipeline* pipeline) {
@@ -685,6 +714,7 @@ namespace dawn_native { namespace metal {
void CommandBuffer::EncodeComputePass(id<MTLCommandBuffer> commandBuffer) {
ComputePipeline* lastPipeline = nullptr;
StorageBufferLengthTracker storageBufferLengths = {};
+ BindGroupTracker bindGroups(&storageBufferLengths);
// Will be autoreleased
id<MTLComputeCommandEncoder> encoder = [commandBuffer computeCommandEncoder];
@@ -700,7 +730,9 @@ namespace dawn_native { namespace metal {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
- storageBufferLengths.Apply(lastPipeline, encoder);
+
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
[encoder dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
@@ -708,7 +740,9 @@ namespace dawn_native { namespace metal {
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- storageBufferLengths.Apply(lastPipeline, encoder);
+
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
@@ -722,6 +756,8 @@ namespace dawn_native { namespace metal {
SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
lastPipeline = ToBackend(cmd->pipeline).Get();
+ bindGroups.OnSetPipeline(lastPipeline);
+
lastPipeline->Encode(encoder);
} break;
@@ -732,9 +768,8 @@ namespace dawn_native { namespace metal {
dynamicOffsets = mCommands.NextData<uint64_t>(cmd->dynamicOffsetCount);
}
- ApplyBindGroup(cmd->index, ToBackend(cmd->group.Get()), cmd->dynamicOffsetCount,
- dynamicOffsets, ToBackend(lastPipeline->GetLayout()),
- &storageBufferLengths, nil, encoder);
+ bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+ cmd->dynamicOffsetCount, dynamicOffsets);
} break;
case Command::InsertDebugMarker: {
@@ -870,6 +905,7 @@ namespace dawn_native { namespace metal {
uint32_t indexBufferBaseOffset = 0;
VertexInputBufferTracker vertexInputBuffers;
StorageBufferLengthTracker storageBufferLengths = {};
+ BindGroupTracker bindGroups(&storageBufferLengths);
// This will be autoreleased
id<MTLRenderCommandEncoder> encoder =
@@ -881,7 +917,8 @@ namespace dawn_native { namespace metal {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
vertexInputBuffers.Apply(encoder, lastPipeline);
- storageBufferLengths.Apply(lastPipeline, encoder);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
// The instance count must be non-zero, otherwise no-op
if (draw->instanceCount != 0) {
@@ -899,7 +936,8 @@ namespace dawn_native { namespace metal {
IndexFormatSize(lastPipeline->GetVertexInputDescriptor()->indexFormat);
vertexInputBuffers.Apply(encoder, lastPipeline);
- storageBufferLengths.Apply(lastPipeline, encoder);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
// The index and instance count must be non-zero, otherwise no-op
if (draw->indexCount != 0 && draw->instanceCount != 0) {
@@ -919,7 +957,8 @@ namespace dawn_native { namespace metal {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
vertexInputBuffers.Apply(encoder, lastPipeline);
- storageBufferLengths.Apply(lastPipeline, encoder);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
@@ -932,7 +971,8 @@ namespace dawn_native { namespace metal {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
vertexInputBuffers.Apply(encoder, lastPipeline);
- storageBufferLengths.Apply(lastPipeline, encoder);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
@@ -973,6 +1013,8 @@ namespace dawn_native { namespace metal {
RenderPipeline* newPipeline = ToBackend(cmd->pipeline).Get();
vertexInputBuffers.OnSetPipeline(lastPipeline, newPipeline);
+ bindGroups.OnSetPipeline(newPipeline);
+
[encoder setDepthStencilState:newPipeline->GetMTLDepthStencilState()];
[encoder setFrontFacingWinding:newPipeline->GetMTLFrontFace()];
[encoder setCullMode:newPipeline->GetMTLCullMode()];
@@ -988,9 +1030,8 @@ namespace dawn_native { namespace metal {
dynamicOffsets = iter->NextData<uint64_t>(cmd->dynamicOffsetCount);
}
- ApplyBindGroup(cmd->index, ToBackend(cmd->group.Get()), cmd->dynamicOffsetCount,
- dynamicOffsets, ToBackend(lastPipeline->GetLayout()),
- &storageBufferLengths, encoder, nil);
+ bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+ cmd->dynamicOffsetCount, dynamicOffsets);
} break;
case Command::SetIndexBuffer: {
@@ -1000,13 +1041,11 @@ namespace dawn_native { namespace metal {
indexBufferBaseOffset = cmd->offset;
} break;
- case Command::SetVertexBuffers: {
- SetVertexBuffersCmd* cmd = iter->NextCommand<SetVertexBuffersCmd>();
- const Ref<BufferBase>* buffers = iter->NextData<Ref<BufferBase>>(cmd->count);
- const uint64_t* offsets = iter->NextData<uint64_t>(cmd->count);
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- vertexInputBuffers.OnSetVertexBuffers(cmd->startSlot, cmd->count, buffers,
- offsets);
+ vertexInputBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+ cmd->offset);
} break;
default:
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
index 20ab9e96378..5d8c671e172 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
@@ -21,8 +21,9 @@
#include "dawn_native/Device.h"
#include "dawn_native/metal/Forward.h"
+#import <IOSurface/IOSurfaceRef.h>
#import <Metal/Metal.h>
-#import <QuartzCore/CAMetalLayer.h>
+#import <QuartzCore/QuartzCore.h>
#include <atomic>
#include <memory>
@@ -42,9 +43,10 @@ namespace dawn_native { namespace metal {
Serial GetCompletedCommandSerial() const final override;
Serial GetLastSubmittedCommandSerial() const final override;
- void TickImpl() override;
+ MaybeError TickImpl() override;
id<MTLDevice> GetMTLDevice();
+ id<MTLCommandQueue> GetMTLQueue();
id<MTLCommandBuffer> GetPendingCommandBuffer();
Serial GetPendingCommandSerial() const override;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
index dda06915bee..bacc529ab05 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
@@ -76,10 +76,15 @@ namespace dawn_native { namespace metal {
}
void Device::InitTogglesFromDriver() {
+#if defined(DAWN_PLATFORM_MACOS)
+ if (@available(macOS 10.12, *)) {
+ bool emulateStoreAndMSAAResolve =
+ ![mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
+ SetToggle(Toggle::EmulateStoreAndMSAAResolve, emulateStoreAndMSAAResolve);
+ }
+#endif
+
// TODO(jiawei.shao@intel.com): check iOS feature sets
- bool emulateStoreAndMSAAResolve =
- ![mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
- SetToggle(Toggle::EmulateStoreAndMSAAResolve, emulateStoreAndMSAAResolve);
// TODO(jiawei.shao@intel.com): tighten this workaround when the driver bug is fixed.
SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
@@ -148,10 +153,10 @@ namespace dawn_native { namespace metal {
return mLastSubmittedSerial + 1;
}
- void Device::TickImpl() {
+ MaybeError Device::TickImpl() {
Serial completedSerial = GetCompletedCommandSerial();
- mDynamicUploader->Tick(completedSerial);
+ mDynamicUploader->Deallocate(completedSerial);
mMapTracker->Tick(completedSerial);
if (mPendingCommands != nil) {
@@ -162,12 +167,18 @@ namespace dawn_native { namespace metal {
mCompletedSerial++;
mLastSubmittedSerial++;
}
+
+ return {};
}
id<MTLDevice> Device::GetMTLDevice() {
return mMtlDevice;
}
+ id<MTLCommandQueue> Device::GetMTLQueue() {
+ return mCommandQueue;
+ }
+
id<MTLCommandBuffer> Device::GetPendingCommandBuffer() {
TRACE_EVENT0(GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
"DeviceMTL::GetPendingCommandBuffer");
@@ -236,6 +247,7 @@ namespace dawn_native { namespace metal {
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
std::unique_ptr<StagingBufferBase> stagingBuffer =
std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
return std::move(stagingBuffer);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
index 2469371d7e2..491a70abe72 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
@@ -24,8 +24,7 @@ namespace dawn_native { namespace metal {
: PipelineLayoutBase(device, descriptor) {
// Each stage has its own numbering namespace in CompilerMSL.
for (auto stage : IterateStages(kAllStages)) {
- // Buffer number 0 is reserved for push constants
- uint32_t bufferIndex = 1;
+ uint32_t bufferIndex = 0;
uint32_t samplerIndex = 0;
uint32_t textureIndex = 0;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h
index 79bafb2339f..d9869dec70f 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h
@@ -27,7 +27,7 @@ namespace dawn_native { namespace metal {
Queue(Device* device);
private:
- void SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
};
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
index 084c9ef05bb..d815c6e56ce 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
@@ -23,7 +23,7 @@ namespace dawn_native { namespace metal {
Queue::Queue(Device* device) : QueueBase(device) {
}
- void Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
Device* device = ToBackend(GetDevice());
device->Tick();
id<MTLCommandBuffer> commandBuffer = device->GetPendingCommandBuffer();
@@ -37,6 +37,7 @@ namespace dawn_native { namespace metal {
"CommandBufferMTL::FillCommands");
device->SubmitPendingCommandBuffer();
+ return {};
}
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
index c00d488cace..6683f1ca9a1 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
@@ -182,7 +182,12 @@ namespace dawn_native { namespace metal {
}
}
- MTLColorWriteMask MetalColorWriteMask(dawn::ColorWriteMask writeMask) {
+ MTLColorWriteMask MetalColorWriteMask(dawn::ColorWriteMask writeMask,
+ bool isDeclaredInFragmentShader) {
+ if (!isDeclaredInFragmentShader) {
+ return MTLColorWriteMaskNone;
+ }
+
MTLColorWriteMask mask = MTLColorWriteMaskNone;
if (writeMask & dawn::ColorWriteMask::Red) {
@@ -202,7 +207,8 @@ namespace dawn_native { namespace metal {
}
void ComputeBlendDesc(MTLRenderPipelineColorAttachmentDescriptor* attachment,
- const ColorStateDescriptor* descriptor) {
+ const ColorStateDescriptor* descriptor,
+ bool isDeclaredInFragmentShader) {
attachment.blendingEnabled = BlendEnabled(descriptor);
attachment.sourceRGBBlendFactor =
MetalBlendFactor(descriptor->colorBlend.srcFactor, false);
@@ -214,7 +220,8 @@ namespace dawn_native { namespace metal {
attachment.destinationAlphaBlendFactor =
MetalBlendFactor(descriptor->alphaBlend.dstFactor, true);
attachment.alphaBlendOperation = MetalBlendOperation(descriptor->alphaBlend.operation);
- attachment.writeMask = MetalColorWriteMask(descriptor->writeMask);
+ attachment.writeMask =
+ MetalColorWriteMask(descriptor->writeMask, isDeclaredInFragmentShader);
}
MTLStencilOperation MetalStencilOperation(dawn::StencilOperation stencilOperation) {
@@ -339,11 +346,15 @@ namespace dawn_native { namespace metal {
descriptorMTL.stencilAttachmentPixelFormat = MetalPixelFormat(depthStencilFormat);
}
+ const ShaderModuleBase::FragmentOutputBaseTypes& fragmentOutputBaseTypes =
+ descriptor->fragmentStage->module->GetFragmentOutputBaseTypes();
for (uint32_t i : IterateBitSet(GetColorAttachmentsMask())) {
descriptorMTL.colorAttachments[i].pixelFormat =
MetalPixelFormat(GetColorAttachmentFormat(i));
const ColorStateDescriptor* descriptor = GetColorStateDescriptor(i);
- ComputeBlendDesc(descriptorMTL.colorAttachments[i], descriptor);
+ bool isDeclaredInFragmentShader = fragmentOutputBaseTypes[i] != Format::Other;
+ ComputeBlendDesc(descriptorMTL.colorAttachments[i], descriptor,
+ isDeclaredInFragmentShader);
}
descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
@@ -354,8 +365,6 @@ namespace dawn_native { namespace metal {
descriptorMTL.sampleCount = GetSampleCount();
- // TODO(kainino@chromium.org): push constants, textures, samplers
-
{
NSError* error = nil;
mMtlRenderPipelineState = [mtlDevice newRenderPipelineStateWithDescriptor:descriptorMTL
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
index 69c96333bd2..d27c5aab14b 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
@@ -54,10 +54,6 @@ namespace dawn_native { namespace metal {
// If these options are changed, the values in DawnSPIRVCrossMSLFastFuzzer.cpp need to be
// updated.
- spirv_cross::CompilerGLSL::Options options_glsl;
- options_glsl.vertex.flip_vert_y = true;
- compiler.spirv_cross::CompilerGLSL::set_common_options(options_glsl);
-
spirv_cross::CompilerMSL::Options options_msl;
// Disable PointSize builtin for https://bugs.chromium.org/p/dawn/issues/detail?id=146
@@ -77,17 +73,6 @@ namespace dawn_native { namespace metal {
// To make the MSL indices match the indices chosen in the PipelineLayout, we build
// a table of MSLResourceBinding to give to SPIRV-Cross.
- // Reserve index 0 for buffers for the push constants buffer.
- for (auto stage : IterateStages(kAllStages)) {
- spirv_cross::MSLResourceBinding binding;
- binding.stage = SpirvExecutionModelForStage(stage);
- binding.desc_set = spirv_cross::kPushConstDescSet;
- binding.binding = spirv_cross::kPushConstBinding;
- binding.msl_buffer = 0;
-
- compiler.add_msl_resource_binding(binding);
- }
-
// Create one resource binding entry per stage per binding.
for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
const auto& bgInfo = layout->GetBindGroupLayout(group)->GetBindingInfo();
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
index 063add6d7a6..5141ea77eeb 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
@@ -28,7 +28,7 @@ namespace dawn_native { namespace metal {
protected:
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- void OnBeforePresent(TextureBase* texture) override;
+ MaybeError OnBeforePresent(TextureBase* texture) override;
};
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
index 0677ca0a253..92458a209fd 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
@@ -26,6 +26,7 @@ namespace dawn_native { namespace metal {
const auto& im = GetImplementation();
DawnWSIContextMetal wsiContext = {};
wsiContext.device = ToBackend(GetDevice())->GetMTLDevice();
+ wsiContext.queue = ToBackend(GetDevice())->GetMTLQueue();
im.Init(im.userData, &wsiContext);
}
@@ -45,7 +46,8 @@ namespace dawn_native { namespace metal {
return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture);
}
- void SwapChain::OnBeforePresent(TextureBase*) {
+ MaybeError SwapChain::OnBeforePresent(TextureBase*) {
+ return {};
}
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
index d4feb5bfc47..13c30f87613 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
@@ -17,6 +17,7 @@
#include "dawn_native/Texture.h"
+#include <IOSurface/IOSurfaceRef.h>
#import <Metal/Metal.h>
namespace dawn_native { namespace metal {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
index 8bbd28be821..12bbc73fe7e 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
@@ -14,10 +14,9 @@
#include "dawn_native/metal/TextureMTL.h"
+#include "common/Platform.h"
#include "dawn_native/metal/DeviceMTL.h"
-#include <IOSurface/IOSurface.h>
-
namespace dawn_native { namespace metal {
namespace {
@@ -121,6 +120,14 @@ namespace dawn_native { namespace metal {
return DAWN_VALIDATION_ERROR("Unsupported IOSurface format");
}
}
+
+#if defined(DAWN_PLATFORM_MACOS)
+ MTLStorageMode kIOSurfaceStorageMode = MTLStorageModeManaged;
+#elif defined(DAWN_PLATFORM_IOS)
+ MTLStorageMode kIOSurfaceStorageMode = MTLStorageModePrivate;
+#else
+# error "Unsupported Apple platform."
+#endif
}
MTLPixelFormat MetalPixelFormat(dawn::TextureFormat format) {
@@ -207,6 +214,7 @@ namespace dawn_native { namespace metal {
case dawn::TextureFormat::Depth24PlusStencil8:
return MTLPixelFormatDepth32Float_Stencil8;
+#if defined(DAWN_PLATFORM_MACOS)
case dawn::TextureFormat::BC1RGBAUnorm:
return MTLPixelFormatBC1_RGBA;
case dawn::TextureFormat::BC1RGBAUnormSrgb:
@@ -235,6 +243,7 @@ namespace dawn_native { namespace metal {
return MTLPixelFormatBC7_RGBAUnorm;
case dawn::TextureFormat::BC7RGBAUnormSrgb:
return MTLPixelFormatBC7_RGBAUnorm_sRGB;
+#endif
default:
UNREACHABLE();
@@ -322,7 +331,7 @@ namespace dawn_native { namespace metal {
uint32_t plane)
: TextureBase(device, descriptor, TextureState::OwnedInternal) {
MTLTextureDescriptor* mtlDesc = CreateMetalTextureDescriptor(descriptor);
- mtlDesc.storageMode = MTLStorageModeManaged;
+ mtlDesc.storageMode = kIOSurfaceStorageMode;
mMtlTexture = [device->GetMTLDevice() newTextureWithDescriptor:mtlDesc
iosurface:ioSurface
plane:plane];
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
index 44e62db51de..08e39b2e60c 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
@@ -150,6 +150,7 @@ namespace dawn_native { namespace null {
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
std::unique_ptr<StagingBufferBase> stagingBuffer =
std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
return std::move(stagingBuffer);
}
@@ -196,8 +197,9 @@ namespace dawn_native { namespace null {
return mLastSubmittedSerial + 1;
}
- void Device::TickImpl() {
+ MaybeError Device::TickImpl() {
SubmitPendingOperations();
+ return {};
}
void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
@@ -317,8 +319,9 @@ namespace dawn_native { namespace null {
Queue::~Queue() {
}
- void Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
+ MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
ToBackend(GetDevice())->SubmitPendingOperations();
+ return {};
}
// SwapChain
@@ -336,7 +339,8 @@ namespace dawn_native { namespace null {
return GetDevice()->CreateTexture(descriptor);
}
- void SwapChain::OnBeforePresent(TextureBase*) {
+ MaybeError SwapChain::OnBeforePresent(TextureBase*) {
+ return {};
}
// NativeSwapChainImpl
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
index 640ff588c75..ef98719223e 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
@@ -26,7 +26,7 @@
#include "dawn_native/PipelineLayout.h"
#include "dawn_native/Queue.h"
#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/RingBuffer.h"
+#include "dawn_native/RingBufferAllocator.h"
#include "dawn_native/Sampler.h"
#include "dawn_native/ShaderModule.h"
#include "dawn_native/StagingBuffer.h"
@@ -92,7 +92,7 @@ namespace dawn_native { namespace null {
Serial GetCompletedCommandSerial() const final override;
Serial GetLastSubmittedCommandSerial() const final override;
Serial GetPendingCommandSerial() const override;
- void TickImpl() override;
+ MaybeError TickImpl() override;
void AddPendingOperation(std::unique_ptr<PendingOperation> operation);
void SubmitPendingOperations();
@@ -191,7 +191,7 @@ namespace dawn_native { namespace null {
~Queue();
private:
- void SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
};
class SwapChain : public SwapChainBase {
@@ -201,7 +201,7 @@ namespace dawn_native { namespace null {
protected:
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- void OnBeforePresent(TextureBase*) override;
+ MaybeError OnBeforePresent(TextureBase*) override;
};
class NativeSwapChainImpl {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
index 8351509d0fa..9e9dae2e637 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/opengl/CommandBufferGL.h"
#include "dawn_native/BindGroup.h"
+#include "dawn_native/BindGroupTracker.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
#include "dawn_native/RenderBundle.h"
@@ -141,19 +142,13 @@ namespace dawn_native { namespace opengl {
mIndexBuffer = ToBackend(buffer);
}
- void OnSetVertexBuffers(uint32_t startSlot,
- uint32_t count,
- Ref<BufferBase>* buffers,
- uint64_t* offsets) {
- for (uint32_t i = 0; i < count; ++i) {
- uint32_t slot = startSlot + i;
- mVertexBuffers[slot] = ToBackend(buffers[i].Get());
- mVertexBufferOffsets[slot] = offsets[i];
- }
+ void OnSetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset) {
+ mVertexBuffers[slot] = ToBackend(buffer);
+ mVertexBufferOffsets[slot] = offset;
// Use 64 bit masks and make sure there are no shift UB
static_assert(kMaxVertexBuffers <= 8 * sizeof(unsigned long long) - 1, "");
- mDirtyVertexBuffers |= ((1ull << count) - 1ull) << startSlot;
+ mDirtyVertexBuffers |= 1ull << slot;
}
void OnSetPipeline(RenderPipelineBase* pipeline) {
@@ -215,88 +210,109 @@ namespace dawn_native { namespace opengl {
RenderPipelineBase* mLastPipeline = nullptr;
};
- // Handles SetBindGroup commands with the specifics of translating to OpenGL texture and
- // buffer units
- void ApplyBindGroup(const OpenGLFunctions& gl,
- uint32_t index,
- BindGroupBase* group,
- PipelineLayout* pipelineLayout,
- PipelineGL* pipeline,
- uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets) {
- const auto& indices = pipelineLayout->GetBindingIndexInfo()[index];
- const auto& layout = group->GetLayout()->GetBindingInfo();
- uint32_t currentDynamicIndex = 0;
-
- for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
- switch (layout.types[bindingIndex]) {
- case dawn::BindingType::UniformBuffer: {
- BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
- GLuint buffer = ToBackend(binding.buffer)->GetHandle();
- GLuint uboIndex = indices[bindingIndex];
- GLuint offset = binding.offset;
-
- if (layout.dynamic[bindingIndex]) {
- offset += dynamicOffsets[currentDynamicIndex];
- ++currentDynamicIndex;
- }
+ class BindGroupTracker : public BindGroupTrackerBase<BindGroupBase*, false> {
+ public:
+ void OnSetPipeline(RenderPipeline* pipeline) {
+ BindGroupTrackerBase::OnSetPipeline(pipeline);
+ mPipeline = pipeline;
+ }
- gl.BindBufferRange(GL_UNIFORM_BUFFER, uboIndex, buffer, offset,
- binding.size);
- } break;
+ void OnSetPipeline(ComputePipeline* pipeline) {
+ BindGroupTrackerBase::OnSetPipeline(pipeline);
+ mPipeline = pipeline;
+ }
- case dawn::BindingType::Sampler: {
- Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
- GLuint samplerIndex = indices[bindingIndex];
+ void Apply(const OpenGLFunctions& gl) {
+ for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ ApplyBindGroup(gl, index, mBindGroups[index], mDynamicOffsetCounts[index],
+ mDynamicOffsets[index].data());
+ }
+ DidApply();
+ }
- for (PipelineGL::SamplerUnit unit :
- pipeline->GetTextureUnitsForSampler(samplerIndex)) {
- // Only use filtering for certain texture units, because int and uint
- // texture are only complete without filtering
- if (unit.shouldUseFiltering) {
- gl.BindSampler(unit.unit, sampler->GetFilteringHandle());
- } else {
- gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
+ private:
+ void ApplyBindGroup(const OpenGLFunctions& gl,
+ uint32_t index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets) {
+ const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
+ const auto& layout = group->GetLayout()->GetBindingInfo();
+ uint32_t currentDynamicIndex = 0;
+
+ for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
+ switch (layout.types[bindingIndex]) {
+ case dawn::BindingType::UniformBuffer: {
+ BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+ GLuint buffer = ToBackend(binding.buffer)->GetHandle();
+ GLuint uboIndex = indices[bindingIndex];
+ GLuint offset = binding.offset;
+
+ if (layout.hasDynamicOffset[bindingIndex]) {
+ offset += dynamicOffsets[currentDynamicIndex];
+ ++currentDynamicIndex;
}
- }
- } break;
- case dawn::BindingType::SampledTexture: {
- TextureView* view = ToBackend(group->GetBindingAsTextureView(bindingIndex));
- GLuint handle = view->GetHandle();
- GLenum target = view->GetGLTarget();
- GLuint viewIndex = indices[bindingIndex];
+ gl.BindBufferRange(GL_UNIFORM_BUFFER, uboIndex, buffer, offset,
+ binding.size);
+ } break;
- for (auto unit : pipeline->GetTextureUnitsForTextureView(viewIndex)) {
- gl.ActiveTexture(GL_TEXTURE0 + unit);
- gl.BindTexture(target, handle);
- }
- } break;
+ case dawn::BindingType::Sampler: {
+ Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+ GLuint samplerIndex = indices[bindingIndex];
- case dawn::BindingType::StorageBuffer: {
- BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
- GLuint buffer = ToBackend(binding.buffer)->GetHandle();
- GLuint ssboIndex = indices[bindingIndex];
- GLuint offset = binding.offset;
+ for (PipelineGL::SamplerUnit unit :
+ mPipeline->GetTextureUnitsForSampler(samplerIndex)) {
+ // Only use filtering for certain texture units, because int and
+ // uint texture are only complete without filtering
+ if (unit.shouldUseFiltering) {
+ gl.BindSampler(unit.unit, sampler->GetFilteringHandle());
+ } else {
+ gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
+ }
+ }
+ } break;
+
+ case dawn::BindingType::SampledTexture: {
+ TextureView* view =
+ ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ GLuint handle = view->GetHandle();
+ GLenum target = view->GetGLTarget();
+ GLuint viewIndex = indices[bindingIndex];
+
+ for (auto unit : mPipeline->GetTextureUnitsForTextureView(viewIndex)) {
+ gl.ActiveTexture(GL_TEXTURE0 + unit);
+ gl.BindTexture(target, handle);
+ }
+ } break;
- if (layout.dynamic[bindingIndex]) {
- offset += dynamicOffsets[currentDynamicIndex];
- ++currentDynamicIndex;
- }
+ case dawn::BindingType::StorageBuffer: {
+ BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+ GLuint buffer = ToBackend(binding.buffer)->GetHandle();
+ GLuint ssboIndex = indices[bindingIndex];
+ GLuint offset = binding.offset;
- gl.BindBufferRange(GL_SHADER_STORAGE_BUFFER, ssboIndex, buffer, offset,
- binding.size);
- } break;
+ if (layout.hasDynamicOffset[bindingIndex]) {
+ offset += dynamicOffsets[currentDynamicIndex];
+ ++currentDynamicIndex;
+ }
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
- UNREACHABLE();
- break;
+ gl.BindBufferRange(GL_SHADER_STORAGE_BUFFER, ssboIndex, buffer, offset,
+ binding.size);
+ } break;
+
+ case dawn::BindingType::StorageTexture:
+ case dawn::BindingType::ReadonlyStorageBuffer:
+ UNREACHABLE();
+ break;
- // TODO(shaobo.yan@intel.com): Implement dynamic buffer offset.
+ // TODO(shaobo.yan@intel.com): Implement dynamic buffer offset.
+ }
}
}
- }
+
+ PipelineGL* mPipeline = nullptr;
+ };
void ResolveMultisampledRenderTargets(const OpenGLFunctions& gl,
const BeginRenderPassCmd* renderPass) {
@@ -391,12 +407,10 @@ namespace dawn_native { namespace opengl {
auto TransitionForPass = [](const PassResourceUsage& usages) {
for (size_t i = 0; i < usages.textures.size(); i++) {
Texture* texture = ToBackend(usages.textures[i]);
- // We count the lazy clears for non output attachment textures and depth stencil
- // textures in order to match the backdoor lazy clear counts in Vulkan and D3D12.
+ // We count the lazy clears for non output attachment textures in order to match the
+ // backdoor lazy clear counts in Vulkan and D3D12.
bool isLazyClear =
- ((!(usages.textureUsages[i] & dawn::TextureUsage::OutputAttachment) &&
- texture->GetFormat().IsColor()) ||
- texture->GetFormat().HasDepthOrStencil());
+ !(usages.textureUsages[i] & dawn::TextureUsage::OutputAttachment);
texture->EnsureSubresourceContentInitialized(
0, texture->GetNumMipLevels(), 0, texture->GetArrayLayers(), isLazyClear);
}
@@ -447,8 +461,8 @@ namespace dawn_native { namespace opengl {
GLenum target = texture->GetGLTarget();
const GLFormat& format = texture->GetGLFormat();
if (IsCompleteSubresourceCopiedTo(texture, copySize, dst.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(dst.mipLevel, 1, dst.arrayLayer,
- 1);
+ texture->SetIsSubresourceContentInitialized(true, dst.mipLevel, 1,
+ dst.arrayLayer, 1);
} else {
texture->EnsureSubresourceContentInitialized(dst.mipLevel, 1,
dst.arrayLayer, 1);
@@ -459,22 +473,20 @@ namespace dawn_native { namespace opengl {
gl.BindTexture(target, texture->GetHandle());
const Format& formatInfo = texture->GetFormat();
- gl.PixelStorei(
- GL_UNPACK_ROW_LENGTH,
- src.rowPitch / texture->GetFormat().blockByteSize * formatInfo.blockWidth);
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+ src.rowPitch / formatInfo.blockByteSize * formatInfo.blockWidth);
gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, src.imageHeight);
- if (texture->GetFormat().isCompressed) {
+ if (formatInfo.isCompressed) {
gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, formatInfo.blockByteSize);
gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, formatInfo.blockWidth);
gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, formatInfo.blockHeight);
gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
ASSERT(texture->GetDimension() == dawn::TextureDimension::e2D);
- uint64_t copyDataSize =
- (copySize.width / texture->GetFormat().blockWidth) *
- (copySize.height / texture->GetFormat().blockHeight) *
- texture->GetFormat().blockByteSize;
+ uint64_t copyDataSize = (copySize.width / formatInfo.blockWidth) *
+ (copySize.height / formatInfo.blockHeight) *
+ formatInfo.blockByteSize;
Extent3D copyExtent = ComputeTextureCopyExtent(dst, copySize);
if (texture->GetArrayLayers() > 1) {
@@ -591,7 +603,7 @@ namespace dawn_native { namespace opengl {
srcTexture->EnsureSubresourceContentInitialized(src.mipLevel, 1, src.arrayLayer,
1);
if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
- dstTexture->SetIsSubresourceContentInitialized(dst.mipLevel, 1,
+ dstTexture->SetIsSubresourceContentInitialized(true, dst.mipLevel, 1,
dst.arrayLayer, 1);
} else {
dstTexture->EnsureSubresourceContentInitialized(dst.mipLevel, 1,
@@ -612,6 +624,7 @@ namespace dawn_native { namespace opengl {
void CommandBuffer::ExecuteComputePass() {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
ComputePipeline* lastPipeline = nullptr;
+ BindGroupTracker bindGroupTracker = {};
Command type;
while (mCommands.NextCommandId(&type)) {
@@ -623,6 +636,8 @@ namespace dawn_native { namespace opengl {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+ bindGroupTracker.Apply(gl);
+
gl.DispatchCompute(dispatch->x, dispatch->y, dispatch->z);
// TODO(cwallez@chromium.org): add barriers to the API
gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
@@ -630,6 +645,7 @@ namespace dawn_native { namespace opengl {
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+ bindGroupTracker.Apply(gl);
uint64_t indirectBufferOffset = dispatch->indirectOffset;
Buffer* indirectBuffer = ToBackend(dispatch->indirectBuffer.Get());
@@ -644,6 +660,8 @@ namespace dawn_native { namespace opengl {
SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
lastPipeline = ToBackend(cmd->pipeline).Get();
lastPipeline->ApplyNow();
+
+ bindGroupTracker.OnSetPipeline(lastPipeline);
} break;
case Command::SetBindGroup: {
@@ -652,9 +670,8 @@ namespace dawn_native { namespace opengl {
if (cmd->dynamicOffsetCount > 0) {
dynamicOffsets = mCommands.NextData<uint64_t>(cmd->dynamicOffsetCount);
}
- ApplyBindGroup(gl, cmd->index, cmd->group.Get(),
- ToBackend(lastPipeline->GetLayout()), lastPipeline,
- cmd->dynamicOffsetCount, dynamicOffsets);
+ bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+ cmd->dynamicOffsetCount, dynamicOffsets);
} break;
case Command::InsertDebugMarker:
@@ -760,28 +777,49 @@ namespace dawn_native { namespace opengl {
{
for (uint32_t i :
IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- const auto& attachmentInfo = renderPass->colorAttachments[i];
+ auto* attachmentInfo = &renderPass->colorAttachments[i];
+ TextureView* view = ToBackend(attachmentInfo->view.Get());
// Load op - color
// TODO(cwallez@chromium.org): Choose the clear function depending on the
// componentType: things work for now because the clear color is always a float, but
// when that's fixed will lose precision on integer formats when converting to
// float.
- if (attachmentInfo.loadOp == dawn::LoadOp::Clear) {
+ if (attachmentInfo->loadOp == dawn::LoadOp::Clear) {
gl.ColorMaski(i, true, true, true, true);
- gl.ClearBufferfv(GL_COLOR, i, &attachmentInfo.clearColor.r);
+ gl.ClearBufferfv(GL_COLOR, i, &attachmentInfo->clearColor.r);
+ }
+
+ switch (attachmentInfo->storeOp) {
+ case dawn::StoreOp::Store: {
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ true, view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount());
+ } break;
+
+ case dawn::StoreOp::Clear: {
+ // TODO(natlee@microsoft.com): call glDiscard to do optimization
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ false, view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount());
+ } break;
+
+ default:
+ UNREACHABLE();
+ break;
}
}
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- const auto& attachmentInfo = renderPass->depthStencilAttachment;
- const Format& attachmentFormat = attachmentInfo.view->GetTexture()->GetFormat();
+ auto* attachmentInfo = &renderPass->depthStencilAttachment;
+ const Format& attachmentFormat = attachmentInfo->view->GetTexture()->GetFormat();
+ TextureView* view = ToBackend(attachmentInfo->view.Get());
// Load op - depth/stencil
bool doDepthClear = attachmentFormat.HasDepth() &&
- (attachmentInfo.depthLoadOp == dawn::LoadOp::Clear);
+ (attachmentInfo->depthLoadOp == dawn::LoadOp::Clear);
bool doStencilClear = attachmentFormat.HasStencil() &&
- (attachmentInfo.stencilLoadOp == dawn::LoadOp::Clear);
+ (attachmentInfo->stencilLoadOp == dawn::LoadOp::Clear);
if (doDepthClear) {
gl.DepthMask(GL_TRUE);
@@ -791,14 +829,26 @@ namespace dawn_native { namespace opengl {
}
if (doDepthClear && doStencilClear) {
- gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, attachmentInfo.clearDepth,
- attachmentInfo.clearStencil);
+ gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, attachmentInfo->clearDepth,
+ attachmentInfo->clearStencil);
} else if (doDepthClear) {
- gl.ClearBufferfv(GL_DEPTH, 0, &attachmentInfo.clearDepth);
+ gl.ClearBufferfv(GL_DEPTH, 0, &attachmentInfo->clearDepth);
} else if (doStencilClear) {
- const GLint clearStencil = attachmentInfo.clearStencil;
+ const GLint clearStencil = attachmentInfo->clearStencil;
gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
}
+
+ if (attachmentInfo->depthStoreOp == dawn::StoreOp::Store &&
+ attachmentInfo->stencilStoreOp == dawn::StoreOp::Store) {
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ true, view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount());
+ } else if (attachmentInfo->depthStoreOp == dawn::StoreOp::Clear &&
+ attachmentInfo->stencilStoreOp == dawn::StoreOp::Clear) {
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ false, view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount());
+ }
}
}
@@ -806,12 +856,14 @@ namespace dawn_native { namespace opengl {
uint64_t indexBufferBaseOffset = 0;
InputBufferTracker inputBuffers;
+ BindGroupTracker bindGroupTracker = {};
auto DoRenderBundleCommand = [&](CommandIterator* iter, Command type) {
switch (type) {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
inputBuffers.Apply(gl);
+ bindGroupTracker.Apply(gl);
if (draw->firstInstance > 0) {
gl.DrawArraysInstancedBaseInstance(
@@ -828,6 +880,7 @@ namespace dawn_native { namespace opengl {
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
inputBuffers.Apply(gl);
+ bindGroupTracker.Apply(gl);
dawn::IndexFormat indexFormat =
lastPipeline->GetVertexInputDescriptor()->indexFormat;
@@ -853,6 +906,7 @@ namespace dawn_native { namespace opengl {
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
inputBuffers.Apply(gl);
+ bindGroupTracker.Apply(gl);
uint64_t indirectBufferOffset = draw->indirectOffset;
Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
@@ -866,6 +920,7 @@ namespace dawn_native { namespace opengl {
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
inputBuffers.Apply(gl);
+ bindGroupTracker.Apply(gl);
dawn::IndexFormat indexFormat =
lastPipeline->GetVertexInputDescriptor()->indexFormat;
@@ -894,6 +949,7 @@ namespace dawn_native { namespace opengl {
lastPipeline->ApplyNow(persistentPipelineState);
inputBuffers.OnSetPipeline(lastPipeline);
+ bindGroupTracker.OnSetPipeline(lastPipeline);
} break;
case Command::SetBindGroup: {
@@ -902,9 +958,8 @@ namespace dawn_native { namespace opengl {
if (cmd->dynamicOffsetCount > 0) {
dynamicOffsets = iter->NextData<uint64_t>(cmd->dynamicOffsetCount);
}
- ApplyBindGroup(gl, cmd->index, cmd->group.Get(),
- ToBackend(lastPipeline->GetLayout()), lastPipeline,
- cmd->dynamicOffsetCount, dynamicOffsets);
+ bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+ cmd->dynamicOffsetCount, dynamicOffsets);
} break;
case Command::SetIndexBuffer: {
@@ -913,11 +968,9 @@ namespace dawn_native { namespace opengl {
inputBuffers.OnSetIndexBuffer(cmd->buffer.Get());
} break;
- case Command::SetVertexBuffers: {
- SetVertexBuffersCmd* cmd = iter->NextCommand<SetVertexBuffersCmd>();
- auto buffers = iter->NextData<Ref<BufferBase>>(cmd->count);
- auto offsets = iter->NextData<uint64_t>(cmd->count);
- inputBuffers.OnSetVertexBuffers(cmd->startSlot, cmd->count, buffers, offsets);
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+ inputBuffers.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(), cmd->offset);
} break;
default:
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
index 02837221ff2..a306e743b57 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
@@ -132,8 +132,9 @@ namespace dawn_native { namespace opengl {
return mLastSubmittedSerial + 1;
}
- void Device::TickImpl() {
+ MaybeError Device::TickImpl() {
CheckPassedFences();
+ return {};
}
void Device::CheckPassedFences() {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
index bfbdc272e53..5bafeddfccc 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
@@ -53,7 +53,7 @@ namespace dawn_native { namespace opengl {
Serial GetCompletedCommandSerial() const final override;
Serial GetLastSubmittedCommandSerial() const final override;
Serial GetPendingCommandSerial() const override;
- void TickImpl() override;
+ MaybeError TickImpl() override;
ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
index 241854a6fb3..fde06c853a0 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
@@ -22,7 +22,7 @@ namespace dawn_native { namespace opengl {
Queue::Queue(Device* device) : QueueBase(device) {
}
- void Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
Device* device = ToBackend(GetDevice());
for (uint32_t i = 0; i < commandCount; ++i) {
@@ -30,6 +30,7 @@ namespace dawn_native { namespace opengl {
}
device->SubmitFenceSync();
+ return {};
}
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
index 687d7a4491f..c18486ce2ad 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
@@ -27,7 +27,7 @@ namespace dawn_native { namespace opengl {
Queue(Device* device);
private:
- void SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
};
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
index e7e2d33e537..3059c582316 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
@@ -58,6 +58,7 @@ namespace dawn_native { namespace opengl {
// in D3D12, Metal and Vulkan, so we should normalize it in shaders in all backends.
// See the documentation of spirv_cross::CompilerGLSL::Options::vertex::fixup_clipspace for
// more details.
+ options.vertex.flip_vert_y = true;
options.vertex.fixup_clipspace = true;
// TODO(cwallez@chromium.org): discover the backing context version and use that.
@@ -68,28 +69,6 @@ namespace dawn_native { namespace opengl {
#endif
compiler.set_common_options(options);
- // Rename the push constant block to be prefixed with the shader stage type so that uniform
- // names don't match between the FS and the VS.
- const auto& resources = compiler.get_shader_resources();
- if (resources.push_constant_buffers.size() > 0) {
- const char* prefix = nullptr;
- switch (compiler.get_execution_model()) {
- case spv::ExecutionModelVertex:
- prefix = "vs_";
- break;
- case spv::ExecutionModelFragment:
- prefix = "fs_";
- break;
- case spv::ExecutionModelGLCompute:
- prefix = "cs_";
- break;
- default:
- UNREACHABLE();
- }
- auto interfaceBlock = resources.push_constant_buffers[0];
- compiler.set_name(interfaceBlock.id, prefix + interfaceBlock.name);
- }
-
ExtractSpirvInfo(compiler);
const auto& bindingInfo = GetBindingInfo();
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
index e988bc4dea8..bbd707464ed 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
@@ -44,7 +44,8 @@ namespace dawn_native { namespace opengl {
TextureBase::TextureState::OwnedExternal);
}
- void SwapChain::OnBeforePresent(TextureBase*) {
+ MaybeError SwapChain::OnBeforePresent(TextureBase*) {
+ return {};
}
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
index 9b7651473ae..d4df7d3a091 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
@@ -30,7 +30,7 @@ namespace dawn_native { namespace opengl {
protected:
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- void OnBeforePresent(TextureBase* texture) override;
+ MaybeError OnBeforePresent(TextureBase* texture) override;
};
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
index 94bd58f0286..4b4fdde1bcf 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
@@ -15,6 +15,9 @@
#include "dawn_native/opengl/TextureGL.h"
#include "common/Assert.h"
+#include "common/Constants.h"
+#include "common/Math.h"
+#include "dawn_native/opengl/BufferGL.h"
#include "dawn_native/opengl/DeviceGL.h"
#include "dawn_native/opengl/UtilsGL.h"
@@ -142,14 +145,8 @@ namespace dawn_native { namespace opengl {
gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- static constexpr uint32_t MAX_TEXEL_SIZE = 16;
- ASSERT(GetFormat().blockByteSize <= MAX_TEXEL_SIZE);
- GLubyte clearColor[MAX_TEXEL_SIZE];
- std::fill(clearColor, clearColor + MAX_TEXEL_SIZE, 255);
- // TODO(natlee@microsoft.com): clear all subresources
- for (uint32_t i = 0; i < GetNumMipLevels(); i++) {
- gl.ClearTexImage(mHandle, i, glFormat.format, glFormat.type, clearColor);
- }
+ GetDevice()->ConsumedError(ClearTexture(0, GetNumMipLevels(), 0, GetArrayLayers(),
+ TextureBase::ClearValue::NonZero));
}
}
@@ -182,50 +179,123 @@ namespace dawn_native { namespace opengl {
return ToBackend(GetDevice())->GetGLFormat(GetFormat());
}
- void Texture::ClearTexture(GLint baseMipLevel,
- GLint levelCount,
- GLint baseArrayLayer,
- uint32_t layerCount) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ MaybeError Texture::ClearTexture(GLint baseMipLevel,
+ GLint levelCount,
+ GLint baseArrayLayer,
+ uint32_t layerCount,
+ TextureBase::ClearValue clearValue) {
// TODO(jiawei.shao@intel.com): initialize the textures with compressed formats.
if (GetFormat().isCompressed) {
- return;
+ return {};
}
- if (GetFormat().HasDepthOrStencil()) {
- bool doDepthClear = GetFormat().HasDepth();
- bool doStencilClear = GetFormat().HasStencil();
- GLfloat depth = 0.0f;
- GLint stencil = 0u;
- if (doDepthClear) {
- gl.DepthMask(GL_TRUE);
- }
- if (doStencilClear) {
- gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
- }
+ Device* device = ToBackend(GetDevice());
+ const OpenGLFunctions& gl = device->gl;
+ uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ if (GetFormat().isRenderable) {
+ if (GetFormat().HasDepthOrStencil()) {
+ bool doDepthClear = GetFormat().HasDepth();
+ bool doStencilClear = GetFormat().HasStencil();
+ GLfloat depth = clearColor;
+ GLint stencil = clearColor;
+ if (doDepthClear) {
+ gl.DepthMask(GL_TRUE);
+ }
+ if (doStencilClear) {
+ gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
+ }
- GLuint framebuffer = 0;
- gl.GenFramebuffers(1, &framebuffer);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GetGLTarget(),
- GetHandle(), 0);
- if (doDepthClear && doStencilClear) {
- gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
- } else if (doDepthClear) {
- gl.ClearBufferfv(GL_DEPTH, 0, &depth);
- } else if (doStencilClear) {
- gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
+ GLuint framebuffer = 0;
+ gl.GenFramebuffers(1, &framebuffer);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+ // TODO(natlee@microsoft.com): clear all mip levels and array layers.
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
+ GetGLTarget(), GetHandle(), 0);
+ if (doDepthClear && doStencilClear) {
+ gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
+ } else if (doDepthClear) {
+ gl.ClearBufferfv(GL_DEPTH, 0, &depth);
+ } else if (doStencilClear) {
+ gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
+ }
+ gl.DeleteFramebuffers(1, &framebuffer);
+ } else {
+ static constexpr uint32_t MAX_TEXEL_SIZE = 16;
+ ASSERT(GetFormat().blockByteSize <= MAX_TEXEL_SIZE);
+ std::array<GLbyte, MAX_TEXEL_SIZE> clearColorData;
+ clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 255;
+ clearColorData.fill(clearColor);
+
+ const GLFormat& glFormat = GetGLFormat();
+ for (GLint level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ Extent3D mipSize = GetMipLevelPhysicalSize(level);
+ gl.ClearTexSubImage(mHandle, level, 0, 0, baseArrayLayer, mipSize.width,
+ mipSize.height, layerCount, glFormat.format, glFormat.type,
+ clearColorData.data());
+ }
}
- gl.DeleteFramebuffers(1, &framebuffer);
} else {
- const GLFormat& glFormat = GetGLFormat();
+ // TODO(natlee@microsoft.com): test compressed textures are cleared
+ // create temp buffer with clear color to copy to the texture image
+ ASSERT(kTextureRowPitchAlignment % GetFormat().blockByteSize == 0);
+ uint32_t rowPitch =
+ Align((GetSize().width / GetFormat().blockWidth) * GetFormat().blockByteSize,
+ kTextureRowPitchAlignment);
+
+ // Make sure that we are not rounding
+ ASSERT(rowPitch % GetFormat().blockByteSize == 0);
+ ASSERT(GetSize().height % GetFormat().blockHeight == 0);
+
+ dawn_native::BufferDescriptor descriptor;
+ descriptor.size = rowPitch * (GetSize().height / GetFormat().blockHeight);
+ if (descriptor.size > std::numeric_limits<uint32_t>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+ }
+ descriptor.nextInChain = nullptr;
+ descriptor.usage = dawn::BufferUsage::CopySrc | dawn::BufferUsage::MapWrite;
+ // TODO(natlee@microsoft.com): use Dynamic Uplaoder here for temp buffer
+ Ref<Buffer> srcBuffer = ToBackend(device->CreateBuffer(&descriptor));
+ // Call release here to prevent memory leak since CreateBuffer will up the ref count to
+ // 1, then assigning to Ref<Buffer> ups the ref count to 2. Release will reduce the ref
+ // count and ensure it to reach 0 when out of use.
+ srcBuffer->Release();
+
+ // Fill the buffer with clear color
+ uint8_t* clearBuffer = nullptr;
+ DAWN_TRY(srcBuffer->MapAtCreation(&clearBuffer));
+ std::fill(reinterpret_cast<uint32_t*>(clearBuffer),
+ reinterpret_cast<uint32_t*>(clearBuffer + descriptor.size), clearColor);
+ srcBuffer->Unmap();
+
+ // Bind buffer and texture, and make the buffer to texture copy
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+ (rowPitch / GetFormat().blockByteSize) * GetFormat().blockWidth);
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
for (GLint level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
- Extent3D mipSize = GetMipLevelPhysicalSize(level);
- gl.ClearTexSubImage(mHandle, level, 0, 0, baseArrayLayer, mipSize.width,
- mipSize.height, layerCount, glFormat.format, glFormat.type,
- nullptr);
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
+ gl.ActiveTexture(GL_TEXTURE0);
+ gl.BindTexture(GetGLTarget(), GetHandle());
+
+ Extent3D size = GetMipLevelPhysicalSize(level);
+ switch (GetDimension()) {
+ case dawn::TextureDimension::e2D:
+ // TODO(natlee@microsoft.com): This will break when layerCount is greater
+ // than 1, because the buffer is only sized for one layer.
+ ASSERT(layerCount == 1);
+ gl.TexSubImage2D(GetGLTarget(), level, 0, 0, size.width, size.height,
+ GetGLFormat().format, GetGLFormat().type, 0);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
}
+ return {};
}
void Texture::EnsureSubresourceContentInitialized(uint32_t baseMipLevel,
@@ -238,11 +308,12 @@ namespace dawn_native { namespace opengl {
}
if (!IsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
layerCount)) {
- ClearTexture(baseMipLevel, levelCount, baseArrayLayer, layerCount);
+ GetDevice()->ConsumedError(ClearTexture(baseMipLevel, levelCount, baseArrayLayer,
+ layerCount, TextureBase::ClearValue::Zero));
if (isLazyClear) {
GetDevice()->IncrementLazyClearCountForTesting();
}
- SetIsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
+ SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
layerCount);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
index 40d82e8a91f..b72c4fc2ec5 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
@@ -45,10 +45,11 @@ namespace dawn_native { namespace opengl {
private:
void DestroyImpl() override;
- void ClearTexture(GLint baseMipLevel,
- GLint levelCount,
- GLint baseArrayLayer,
- uint32_t layerCount);
+ MaybeError ClearTexture(GLint baseMipLevel,
+ GLint levelCount,
+ GLint baseArrayLayer,
+ uint32_t layerCount,
+ TextureBase::ClearValue clearValue);
GLuint mHandle;
GLenum mTarget;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
index d86a3bb0afa..dd9341f71ca 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
@@ -39,6 +39,12 @@ namespace dawn_native { namespace vulkan {
MaybeError Adapter::Initialize() {
DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+ if (!mDeviceInfo.maintenance1 &&
+ mDeviceInfo.properties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
+ return DAWN_DEVICE_LOST_ERROR(
+ "Dawn requires Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 in order to support "
+ "viewport flipY");
+ }
InitializeSupportedExtensions();
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
index dff6bbea062..398569d9baf 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
@@ -25,6 +25,8 @@
const char kVulkanLibName[] = "libvulkan.so.1";
#elif DAWN_PLATFORM_WINDOWS
const char kVulkanLibName[] = "vulkan-1.dll";
+#elif DAWN_PLATFORM_FUCHSIA
+const char kVulkanLibName[] = "libvulkan.so";
#else
# error "Unimplemented Vulkan backend platform"
#endif
@@ -136,6 +138,11 @@ namespace dawn_native { namespace vulkan {
}
}
+ if (mGlobalInfo.fuchsiaImagePipeSwapchain) {
+ layersToRequest.push_back(kLayerNameFuchsiaImagePipeSwapchain);
+ usedKnobs.fuchsiaImagePipeSwapchain = true;
+ }
+
// Always request all extensions used to create VkSurfaceKHR objects so that they are
// always available for embedders looking to create VkSurfaceKHR on our VkInstance.
if (mGlobalInfo.macosSurface) {
@@ -174,6 +181,10 @@ namespace dawn_native { namespace vulkan {
extensionsToRequest.push_back(kExtensionNameKhrXlibSurface);
usedKnobs.xlibSurface = true;
}
+ if (mGlobalInfo.fuchsiaImagePipeSurface) {
+ extensionsToRequest.push_back(kExtensionNameFuchsiaImagePipeSurface);
+ usedKnobs.fuchsiaImagePipeSurface = true;
+ }
VkApplicationInfo appInfo;
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
index e2d36ae179c..336d9a646a5 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
@@ -16,6 +16,7 @@
#include "common/BitSetIterator.h"
#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -60,8 +61,17 @@ namespace dawn_native { namespace vulkan {
}
}
- BindGroupLayout::BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor)
- : BindGroupLayoutBase(device, descriptor) {
+ // static
+ ResultOrError<BindGroupLayout*> BindGroupLayout::Create(
+ Device* device,
+ const BindGroupLayoutDescriptor* descriptor) {
+ std::unique_ptr<BindGroupLayout> bgl =
+ std::make_unique<BindGroupLayout>(device, descriptor);
+ DAWN_TRY(bgl->Initialize());
+ return bgl.release();
+ }
+
+ MaybeError BindGroupLayout::Initialize() {
const auto& info = GetBindingInfo();
// Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
@@ -73,7 +83,7 @@ namespace dawn_native { namespace vulkan {
auto& binding = bindings[numBindings];
binding.binding = bindingIndex;
binding.descriptorType =
- VulkanDescriptorType(info.types[bindingIndex], info.dynamic[bindingIndex]);
+ VulkanDescriptorType(info.types[bindingIndex], info.hasDynamicOffset[bindingIndex]);
binding.descriptorCount = 1;
binding.stageFlags = VulkanShaderStageFlags(info.visibilities[bindingIndex]);
binding.pImmutableSamplers = nullptr;
@@ -88,10 +98,10 @@ namespace dawn_native { namespace vulkan {
createInfo.bindingCount = numBindings;
createInfo.pBindings = bindings.data();
- if (device->fn.CreateDescriptorSetLayout(device->GetVkDevice(), &createInfo, nullptr,
- &mHandle) != VK_SUCCESS) {
- ASSERT(false);
- }
+ Device* device = ToBackend(GetDevice());
+ return CheckVkSuccess(device->fn.CreateDescriptorSetLayout(device->GetVkDevice(),
+ &createInfo, nullptr, &mHandle),
+ "CreateDescriptorSetLayout");
}
BindGroupLayout::~BindGroupLayout() {
@@ -146,8 +156,8 @@ namespace dawn_native { namespace vulkan {
if (descriptorTypeIndex[type] == -1) {
descriptorTypeIndex[type] = numSizes;
- result[numSizes].type =
- VulkanDescriptorType(info.types[bindingIndex], info.dynamic[bindingIndex]);
+ result[numSizes].type = VulkanDescriptorType(info.types[bindingIndex],
+ info.hasDynamicOffset[bindingIndex]);
result[numSizes].descriptorCount = 1;
numSizes++;
} else {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
index 37cfbc57b88..12ba2b61cc8 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
@@ -27,7 +27,8 @@ namespace dawn_native { namespace vulkan {
class BindGroupLayout : public BindGroupLayoutBase {
public:
- BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor);
+ static ResultOrError<BindGroupLayout*> Create(Device* device,
+ const BindGroupLayoutDescriptor* descriptor);
~BindGroupLayout();
VkDescriptorSetLayout GetHandle() const;
@@ -37,6 +38,9 @@ namespace dawn_native { namespace vulkan {
PoolSizeSpec ComputePoolSizes(uint32_t* numPoolSizes) const;
private:
+ using BindGroupLayoutBase::BindGroupLayoutBase;
+ MaybeError Initialize();
+
VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
index f321c686da4..aa0f5b5d0d2 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
@@ -14,19 +14,28 @@
#include "dawn_native/vulkan/BindGroupVk.h"
+#include "common/BitSetIterator.h"
#include "dawn_native/vulkan/BindGroupLayoutVk.h"
#include "dawn_native/vulkan/BufferVk.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/SamplerVk.h"
#include "dawn_native/vulkan/TextureVk.h"
-
-#include "common/BitSetIterator.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
- BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
- : BindGroupBase(device, descriptor) {
+ // static
+ ResultOrError<BindGroup*> BindGroup::Create(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ std::unique_ptr<BindGroup> group = std::make_unique<BindGroup>(device, descriptor);
+ DAWN_TRY(group->Initialize());
+ return group.release();
+ }
+
+ MaybeError BindGroup::Initialize() {
+ Device* device = ToBackend(GetDevice());
+
// Create a pool to hold our descriptor set.
// TODO(cwallez@chromium.org): This horribly inefficient, find a way to be better, for
// example by having one pool per bind group layout instead.
@@ -41,10 +50,9 @@ namespace dawn_native { namespace vulkan {
createInfo.poolSizeCount = numPoolSizes;
createInfo.pPoolSizes = poolSizes.data();
- if (device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo, nullptr, &mPool) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo, nullptr, &mPool),
+ "CreateDescriptorPool"));
// Now do the allocation of one descriptor set, this is very suboptimal too.
VkDescriptorSetLayout vkLayout = ToBackend(GetLayout())->GetHandle();
@@ -56,10 +64,9 @@ namespace dawn_native { namespace vulkan {
allocateInfo.descriptorSetCount = 1;
allocateInfo.pSetLayouts = &vkLayout;
- if (device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo, &mHandle) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo, &mHandle),
+ "AllocateDescriptorSets"));
// Now do a write of a single descriptor set with all possible chained data allocated on the
// stack.
@@ -78,7 +85,7 @@ namespace dawn_native { namespace vulkan {
write.dstArrayElement = 0;
write.descriptorCount = 1;
write.descriptorType = VulkanDescriptorType(layoutInfo.types[bindingIndex],
- layoutInfo.dynamic[bindingIndex]);
+ layoutInfo.hasDynamicOffset[bindingIndex]);
switch (layoutInfo.types[bindingIndex]) {
case dawn::BindingType::UniformBuffer:
@@ -118,6 +125,8 @@ namespace dawn_native { namespace vulkan {
device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0,
nullptr);
+
+ return {};
}
BindGroup::~BindGroup() {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
index 5071796e284..bfa3fead67a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
@@ -25,12 +25,16 @@ namespace dawn_native { namespace vulkan {
class BindGroup : public BindGroupBase {
public:
- BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ static ResultOrError<BindGroup*> Create(Device* device,
+ const BindGroupDescriptor* descriptor);
~BindGroup();
VkDescriptorSet GetHandle() const;
private:
+ using BindGroupBase::BindGroupBase;
+ MaybeError Initialize();
+
VkDescriptorPool mPool = VK_NULL_HANDLE;
VkDescriptorSet mHandle = VK_NULL_HANDLE;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
index 53085b27488..2133457026f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
@@ -16,6 +16,9 @@
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
+#include "dawn_native/vulkan/MemoryResourceAllocatorVk.h"
+#include "dawn_native/vulkan/ResourceMemoryVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
#include <cstring>
@@ -111,8 +114,14 @@ namespace dawn_native { namespace vulkan {
} // namespace
- Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
- : BufferBase(device, descriptor) {
+ // static
+ ResultOrError<Buffer*> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ std::unique_ptr<Buffer> buffer = std::make_unique<Buffer>(device, descriptor);
+ DAWN_TRY(buffer->Initialize());
+ return buffer.release();
+ }
+
+ MaybeError Buffer::Initialize() {
VkBufferCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
createInfo.pNext = nullptr;
@@ -125,26 +134,25 @@ namespace dawn_native { namespace vulkan {
createInfo.queueFamilyIndexCount = 0;
createInfo.pQueueFamilyIndices = 0;
- if (device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &mHandle) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ "vkCreateBuffer"));
VkMemoryRequirements requirements;
device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
bool requestMappable =
(GetUsage() & (dawn::BufferUsage::MapRead | dawn::BufferUsage::MapWrite)) != 0;
- if (!device->GetMemoryAllocator()->Allocate(requirements, requestMappable,
- &mMemoryAllocation)) {
- ASSERT(false);
- }
+ DAWN_TRY_ASSIGN(mMemoryAllocation, device->AllocateMemory(requirements, requestMappable));
- if (device->fn.BindBufferMemory(device->GetVkDevice(), mHandle,
- mMemoryAllocation.GetMemory(),
- mMemoryAllocation.GetMemoryOffset()) != VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.BindBufferMemory(device->GetVkDevice(), mHandle,
+ ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+ mMemoryAllocation.GetOffset()),
+ "vkBindBufferMemory"));
+
+ return {};
}
Buffer::~Buffer() {
@@ -243,7 +251,7 @@ namespace dawn_native { namespace vulkan {
}
void Buffer::DestroyImpl() {
- ToBackend(GetDevice())->GetMemoryAllocator()->Free(&mMemoryAllocation);
+ ToBackend(GetDevice())->DeallocateMemory(mMemoryAllocation);
if (mHandle != VK_NULL_HANDLE) {
ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
index 354c39e34e4..210fadccf16 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
@@ -19,6 +19,7 @@
#include "common/SerialQueue.h"
#include "common/vulkan_platform.h"
+#include "dawn_native/ResourceMemoryAllocation.h"
#include "dawn_native/vulkan/MemoryAllocator.h"
namespace dawn_native { namespace vulkan {
@@ -28,7 +29,7 @@ namespace dawn_native { namespace vulkan {
class Buffer : public BufferBase {
public:
- Buffer(Device* device, const BufferDescriptor* descriptor);
+ static ResultOrError<Buffer*> Create(Device* device, const BufferDescriptor* descriptor);
~Buffer();
void OnMapReadCommandSerialFinished(uint32_t mapSerial, const void* data);
@@ -42,6 +43,9 @@ namespace dawn_native { namespace vulkan {
void TransitionUsageNow(CommandRecordingContext* recordingContext, dawn::BufferUsage usage);
private:
+ using BufferBase::BufferBase;
+ MaybeError Initialize();
+
// Dawn API
MaybeError MapReadAsyncImpl(uint32_t serial) override;
MaybeError MapWriteAsyncImpl(uint32_t serial) override;
@@ -52,7 +56,7 @@ namespace dawn_native { namespace vulkan {
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
VkBuffer mHandle = VK_NULL_HANDLE;
- DeviceMemoryAllocation mMemoryAllocation;
+ ResourceMemoryAllocation mMemoryAllocation;
dawn::BufferUsage mLastUsage = dawn::BufferUsage::None;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
index ab503d779da..0e2d00349fc 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/vulkan/CommandBufferVk.h"
+#include "dawn_native/BindGroupTracker.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
#include "dawn_native/RenderBundle.h"
@@ -28,6 +29,7 @@
#include "dawn_native/vulkan/RenderPipelineVk.h"
#include "dawn_native/vulkan/TextureVk.h"
#include "dawn_native/vulkan/UtilsVulkan.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -89,66 +91,24 @@ namespace dawn_native { namespace vulkan {
return region;
}
- class DescriptorSetTracker {
+ class DescriptorSetTracker : public BindGroupTrackerBase<VkDescriptorSet, true, uint32_t> {
public:
- void OnSetBindGroup(uint32_t index,
- VkDescriptorSet set,
- uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets) {
- mDirtySets.set(index);
- mSets[index] = set;
- mDynamicOffsetCounts[index] = dynamicOffsetCount;
- if (dynamicOffsetCount > 0) {
- // Vulkan backend use uint32_t as dynamic offsets type, it is not correct.
- // Vulkan should use VkDeviceSize. Dawn vulkan backend has to handle this.
- for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
- ASSERT(dynamicOffsets[i] <= std::numeric_limits<uint32_t>::max());
- mDynamicOffsets[index][i] = static_cast<uint32_t>(dynamicOffsets[i]);
- }
- }
- }
-
- void OnPipelineLayoutChange(PipelineLayout* layout) {
- if (layout == mCurrentLayout) {
- return;
- }
-
- if (mCurrentLayout == nullptr) {
- // We're at the beginning of a pass so all bind groups will be set before any
- // draw / dispatch. Still clear the dirty sets to avoid leftover dirty sets
- // from previous passes.
- mDirtySets.reset();
- } else {
- // Bindgroups that are not inherited will be set again before any draw or
- // dispatch. Resetting the bits also makes sure we don't have leftover dirty
- // bindgroups that don't exist in the pipeline layout.
- mDirtySets &= ~layout->InheritedGroupsMask(mCurrentLayout);
- }
- mCurrentLayout = layout;
- }
-
- void Flush(Device* device, VkCommandBuffer commands, VkPipelineBindPoint bindPoint) {
- for (uint32_t dirtyIndex : IterateBitSet(mDirtySets)) {
+ void Apply(Device* device, VkCommandBuffer commands, VkPipelineBindPoint bindPoint) {
+ for (uint32_t dirtyIndex :
+ IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
device->fn.CmdBindDescriptorSets(
- commands, bindPoint, mCurrentLayout->GetHandle(), dirtyIndex, 1,
- &mSets[dirtyIndex], mDynamicOffsetCounts[dirtyIndex],
+ commands, bindPoint, ToBackend(mPipelineLayout)->GetHandle(), dirtyIndex, 1,
+ &mBindGroups[dirtyIndex], mDynamicOffsetCounts[dirtyIndex],
mDynamicOffsetCounts[dirtyIndex] > 0 ? mDynamicOffsets[dirtyIndex].data()
: nullptr);
}
- mDirtySets.reset();
+ DidApply();
}
-
- private:
- PipelineLayout* mCurrentLayout = nullptr;
- std::array<VkDescriptorSet, kMaxBindGroups> mSets;
- std::bitset<kMaxBindGroups> mDirtySets;
- std::array<uint32_t, kMaxBindGroups> mDynamicOffsetCounts;
- std::array<std::array<uint32_t, kMaxBindingsPerGroup>, kMaxBindGroups> mDynamicOffsets;
};
- void RecordBeginRenderPass(CommandRecordingContext* recordingContext,
- Device* device,
- BeginRenderPassCmd* renderPass) {
+ MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
+ Device* device,
+ BeginRenderPassCmd* renderPass) {
VkCommandBuffer commands = recordingContext->commandBuffer;
// Query a VkRenderPass from the cache
@@ -178,14 +138,19 @@ namespace dawn_native { namespace vulkan {
TextureView* resolveView = ToBackend(attachmentInfo.resolveTarget.Get());
ToBackend(resolveView->GetTexture())
->SetIsSubresourceContentInitialized(
- resolveView->GetBaseMipLevel(), resolveView->GetLevelCount(),
+ true, resolveView->GetBaseMipLevel(), resolveView->GetLevelCount(),
resolveView->GetBaseArrayLayer(), resolveView->GetLayerCount());
}
switch (attachmentInfo.storeOp) {
case dawn::StoreOp::Store: {
view->GetTexture()->SetIsSubresourceContentInitialized(
- view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
+ true, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
+ } break;
+
+ case dawn::StoreOp::Clear: {
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ false, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
} break;
default: { UNREACHABLE(); } break;
@@ -197,22 +162,43 @@ namespace dawn_native { namespace vulkan {
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
auto& attachmentInfo = renderPass->depthStencilAttachment;
- query.SetDepthStencil(attachmentInfo.view->GetTexture()->GetFormat().format,
+ TextureView* view = ToBackend(attachmentInfo.view.Get());
+
+ // If the depth stencil texture has not been initialized, we want to use loadop
+ // clear to init the contents to 0's
+ if (!view->GetTexture()->IsSubresourceContentInitialized(
+ view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount())) {
+ if (view->GetTexture()->GetFormat().HasDepth() &&
+ attachmentInfo.depthLoadOp == dawn::LoadOp::Load) {
+ attachmentInfo.clearDepth = 0.0f;
+ attachmentInfo.depthLoadOp = dawn::LoadOp::Clear;
+ }
+ if (view->GetTexture()->GetFormat().HasStencil() &&
+ attachmentInfo.stencilLoadOp == dawn::LoadOp::Load) {
+ attachmentInfo.clearStencil = 0u;
+ attachmentInfo.stencilLoadOp = dawn::LoadOp::Clear;
+ }
+ }
+ query.SetDepthStencil(view->GetTexture()->GetFormat().format,
attachmentInfo.depthLoadOp, attachmentInfo.stencilLoadOp);
- if (attachmentInfo.depthLoadOp == dawn::LoadOp::Load ||
- attachmentInfo.stencilLoadOp == dawn::LoadOp::Load) {
- ToBackend(attachmentInfo.view->GetTexture())
- ->EnsureSubresourceContentInitialized(
- recordingContext, attachmentInfo.view->GetBaseMipLevel(),
- attachmentInfo.view->GetLevelCount(),
- attachmentInfo.view->GetBaseArrayLayer(),
- attachmentInfo.view->GetLayerCount());
+
+ if (attachmentInfo.depthStoreOp == dawn::StoreOp::Store &&
+ attachmentInfo.stencilStoreOp == dawn::StoreOp::Store) {
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ true, view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount());
+ } else if (attachmentInfo.depthStoreOp == dawn::StoreOp::Clear &&
+ attachmentInfo.stencilStoreOp == dawn::StoreOp::Clear) {
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ false, view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount());
}
}
query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
- renderPassVK = device->GetRenderPassCache()->GetRenderPass(query);
+ DAWN_TRY_ASSIGN(renderPassVK, device->GetRenderPassCache()->GetRenderPass(query));
}
// Create a framebuffer that will be used once for the render pass and gather the clear
@@ -275,10 +261,10 @@ namespace dawn_native { namespace vulkan {
createInfo.height = renderPass->height;
createInfo.layers = 1;
- if (device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo, nullptr,
- &framebuffer) != VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(
+ CheckVkSuccess(device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo,
+ nullptr, &framebuffer),
+ "CreateFramebuffer"));
// We don't reuse VkFramebuffers so mark the framebuffer for deletion as soon as the
// commands currently being recorded are finished.
@@ -298,9 +284,17 @@ namespace dawn_native { namespace vulkan {
beginInfo.pClearValues = clearValues.data();
device->fn.CmdBeginRenderPass(commands, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
+
+ return {};
}
} // anonymous namespace
+ // static
+ CommandBuffer* CommandBuffer::Create(CommandEncoderBase* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return new CommandBuffer(encoder, descriptor);
+ }
+
CommandBuffer::CommandBuffer(CommandEncoderBase* encoder,
const CommandBufferDescriptor* descriptor)
: CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
@@ -330,12 +324,7 @@ namespace dawn_native { namespace vulkan {
tempBufferDescriptor.usage = dawn::BufferUsage::CopySrc | dawn::BufferUsage::CopyDst;
Device* device = ToBackend(GetDevice());
- Ref<Buffer> tempBuffer = ToBackend(device->CreateBuffer(&tempBufferDescriptor));
- // After device->CreateBuffer(&tempBufferDescriptor) is called, the ref count of the buffer
- // object is 1, and after assigning it to a Ref<Buffer>, the ref count of it will be 2. To
- // prevent memory leak, we must reduce the ref count here to ensure the ref count of this
- // object to be 0 after all the Ref<> objects that contain the buffer object are released.
- tempBuffer->Release();
+ Ref<Buffer> tempBuffer = AcquireRef(ToBackend(device->CreateBuffer(&tempBufferDescriptor)));
BufferCopy tempBufferCopy;
tempBufferCopy.buffer = tempBuffer.Get();
@@ -368,7 +357,7 @@ namespace dawn_native { namespace vulkan {
recordingContext->tempBuffers.emplace_back(tempBuffer);
}
- void CommandBuffer::RecordCommands(CommandRecordingContext* recordingContext) {
+ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingContext) {
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = recordingContext->commandBuffer;
@@ -429,7 +418,7 @@ namespace dawn_native { namespace vulkan {
subresource.mipLevel)) {
// Since texture has been overwritten, it has been "initialized"
dst.texture->SetIsSubresourceContentInitialized(
- subresource.mipLevel, 1, subresource.baseArrayLayer, 1);
+ true, subresource.mipLevel, 1, subresource.baseArrayLayer, 1);
} else {
ToBackend(dst.texture)
->EnsureSubresourceContentInitialized(recordingContext,
@@ -488,7 +477,7 @@ namespace dawn_native { namespace vulkan {
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
dst.mipLevel)) {
// Since destination texture has been overwritten, it has been "initialized"
- dst.texture->SetIsSubresourceContentInitialized(dst.mipLevel, 1,
+ dst.texture->SetIsSubresourceContentInitialized(true, dst.mipLevel, 1,
dst.arrayLayer, 1);
} else {
ToBackend(dst.texture)
@@ -539,7 +528,7 @@ namespace dawn_native { namespace vulkan {
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
TransitionForPass(recordingContext, passResourceUsages[nextPassNumber]);
- RecordRenderPass(recordingContext, cmd);
+ DAWN_TRY(RecordRenderPass(recordingContext, cmd));
nextPassNumber++;
} break;
@@ -556,13 +545,15 @@ namespace dawn_native { namespace vulkan {
default: { UNREACHABLE(); } break;
}
}
+
+ return {};
}
void CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext) {
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = recordingContext->commandBuffer;
- DescriptorSetTracker descriptorSets;
+ DescriptorSetTracker descriptorSets = {};
Command type;
while (mCommands.NextCommandId(&type)) {
@@ -574,7 +565,7 @@ namespace dawn_native { namespace vulkan {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
- descriptorSets.Flush(device, commands, VK_PIPELINE_BIND_POINT_COMPUTE);
+ descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_COMPUTE);
device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
} break;
@@ -582,7 +573,7 @@ namespace dawn_native { namespace vulkan {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
- descriptorSets.Flush(device, commands, VK_PIPELINE_BIND_POINT_COMPUTE);
+ descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_COMPUTE);
device->fn.CmdDispatchIndirect(
commands, indirectBuffer,
static_cast<VkDeviceSize>(dispatch->indirectOffset));
@@ -606,7 +597,7 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_COMPUTE,
pipeline->GetHandle());
- descriptorSets.OnPipelineLayoutChange(ToBackend(pipeline->GetLayout()));
+ descriptorSets.OnSetPipeline(pipeline);
} break;
case Command::InsertDebugMarker: {
@@ -663,12 +654,13 @@ namespace dawn_native { namespace vulkan {
// EndComputePass should have been called
UNREACHABLE();
}
- void CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingContext,
- BeginRenderPassCmd* renderPassCmd) {
+
+ MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingContext,
+ BeginRenderPassCmd* renderPassCmd) {
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = recordingContext->commandBuffer;
- RecordBeginRenderPass(recordingContext, device, renderPassCmd);
+ DAWN_TRY(RecordBeginRenderPass(recordingContext, device, renderPassCmd));
// Set the default value for the dynamic state
{
@@ -688,9 +680,9 @@ namespace dawn_native { namespace vulkan {
// The viewport and scissor default to cover all of the attachments
VkViewport viewport;
viewport.x = 0.0f;
- viewport.y = 0.0f;
+ viewport.y = static_cast<float>(renderPassCmd->height);
viewport.width = static_cast<float>(renderPassCmd->width);
- viewport.height = static_cast<float>(renderPassCmd->height);
+ viewport.height = -static_cast<float>(renderPassCmd->height);
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
device->fn.CmdSetViewport(commands, 0, 1, &viewport);
@@ -703,7 +695,7 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
}
- DescriptorSetTracker descriptorSets;
+ DescriptorSetTracker descriptorSets = {};
RenderPipeline* lastPipeline = nullptr;
auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
@@ -711,7 +703,7 @@ namespace dawn_native { namespace vulkan {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
- descriptorSets.Flush(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
draw->firstVertex, draw->firstInstance);
} break;
@@ -719,7 +711,7 @@ namespace dawn_native { namespace vulkan {
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
- descriptorSets.Flush(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
draw->firstIndex, draw->baseVertex,
draw->firstInstance);
@@ -729,7 +721,7 @@ namespace dawn_native { namespace vulkan {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
VkBuffer indirectBuffer = ToBackend(draw->indirectBuffer)->GetHandle();
- descriptorSets.Flush(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDrawIndirect(commands, indirectBuffer,
static_cast<VkDeviceSize>(draw->indirectOffset), 1,
0);
@@ -739,7 +731,7 @@ namespace dawn_native { namespace vulkan {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
VkBuffer indirectBuffer = ToBackend(draw->indirectBuffer)->GetHandle();
- descriptorSets.Flush(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDrawIndexedIndirect(
commands, indirectBuffer, static_cast<VkDeviceSize>(draw->indirectOffset),
1, 0);
@@ -825,25 +817,15 @@ namespace dawn_native { namespace vulkan {
pipeline->GetHandle());
lastPipeline = pipeline;
- descriptorSets.OnPipelineLayoutChange(ToBackend(pipeline->GetLayout()));
+ descriptorSets.OnSetPipeline(pipeline);
} break;
- case Command::SetVertexBuffers: {
- SetVertexBuffersCmd* cmd = iter->NextCommand<SetVertexBuffersCmd>();
- auto buffers = iter->NextData<Ref<BufferBase>>(cmd->count);
- auto offsets = iter->NextData<uint64_t>(cmd->count);
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+ VkBuffer buffer = ToBackend(cmd->buffer)->GetHandle();
+ VkDeviceSize offset = static_cast<VkDeviceSize>(cmd->offset);
- std::array<VkBuffer, kMaxVertexBuffers> vkBuffers;
- std::array<VkDeviceSize, kMaxVertexBuffers> vkOffsets;
-
- for (uint32_t i = 0; i < cmd->count; ++i) {
- Buffer* buffer = ToBackend(buffers[i].Get());
- vkBuffers[i] = buffer->GetHandle();
- vkOffsets[i] = static_cast<VkDeviceSize>(offsets[i]);
- }
-
- device->fn.CmdBindVertexBuffers(commands, cmd->startSlot, cmd->count,
- vkBuffers.data(), vkOffsets.data());
+ device->fn.CmdBindVertexBuffers(commands, cmd->slot, 1, &buffer, &offset);
} break;
default:
@@ -858,7 +840,7 @@ namespace dawn_native { namespace vulkan {
case Command::EndRenderPass: {
mCommands.NextCommand<EndRenderPassCmd>();
device->fn.CmdEndRenderPass(commands);
- return;
+ return {};
} break;
case Command::SetBlendColor: {
@@ -882,9 +864,9 @@ namespace dawn_native { namespace vulkan {
SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
VkViewport viewport;
viewport.x = cmd->x;
- viewport.y = cmd->y;
+ viewport.y = cmd->y + cmd->height;
viewport.width = cmd->width;
- viewport.height = cmd->height;
+ viewport.height = -cmd->height;
viewport.minDepth = cmd->minDepth;
viewport.maxDepth = cmd->maxDepth;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
index c6d15c22894..e1b2e5a660a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
@@ -17,6 +17,7 @@
#include "dawn_native/CommandAllocator.h"
#include "dawn_native/CommandBuffer.h"
+#include "dawn_native/Error.h"
#include "common/vulkan_platform.h"
@@ -32,15 +33,18 @@ namespace dawn_native { namespace vulkan {
class CommandBuffer : public CommandBufferBase {
public:
- CommandBuffer(CommandEncoderBase* encoder, const CommandBufferDescriptor* descriptor);
+ static CommandBuffer* Create(CommandEncoderBase* encoder,
+ const CommandBufferDescriptor* descriptor);
~CommandBuffer();
- void RecordCommands(CommandRecordingContext* recordingContext);
+ MaybeError RecordCommands(CommandRecordingContext* recordingContext);
private:
+ CommandBuffer(CommandEncoderBase* encoder, const CommandBufferDescriptor* descriptor);
+
void RecordComputePass(CommandRecordingContext* recordingContext);
- void RecordRenderPass(CommandRecordingContext* recordingContext,
- BeginRenderPassCmd* renderPass);
+ MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
+ BeginRenderPassCmd* renderPass);
void RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
const TextureCopy& srcCopy,
const TextureCopy& dstCopy,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandRecordingContext.h
index 025de69469e..2749fd28416 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandRecordingContext.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandRecordingContext.h
@@ -16,6 +16,8 @@
#include "common/vulkan_platform.h"
+#include "dawn_native/vulkan/BufferVk.h"
+
#include <vector>
namespace dawn_native { namespace vulkan {
@@ -31,6 +33,10 @@ namespace dawn_native { namespace vulkan {
// The internal buffers used in the workaround of texture-to-texture copies with compressed
// formats.
std::vector<Ref<Buffer>> tempBuffers;
+
+ // For Device state tracking only.
+ VkCommandPool commandPool = VK_NULL_HANDLE;
+ bool used = false;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
index 34375434814..2f37620ded2 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
@@ -18,11 +18,21 @@
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/PipelineLayoutVk.h"
#include "dawn_native/vulkan/ShaderModuleVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
- ComputePipeline::ComputePipeline(Device* device, const ComputePipelineDescriptor* descriptor)
- : ComputePipelineBase(device, descriptor) {
+ // static
+ ResultOrError<ComputePipeline*> ComputePipeline::Create(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ std::unique_ptr<ComputePipeline> pipeline =
+ std::make_unique<ComputePipeline>(device, descriptor);
+ DAWN_TRY(pipeline->Initialize(descriptor));
+ return pipeline.release();
+ }
+
+ MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor* descriptor) {
VkComputePipelineCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
createInfo.pNext = nullptr;
@@ -39,10 +49,11 @@ namespace dawn_native { namespace vulkan {
createInfo.stage.pName = descriptor->computeStage.entryPoint;
createInfo.stage.pSpecializationInfo = nullptr;
- if (device->fn.CreateComputePipelines(device->GetVkDevice(), VK_NULL_HANDLE, 1, &createInfo,
- nullptr, &mHandle) != VK_SUCCESS) {
- ASSERT(false);
- }
+ Device* device = ToBackend(GetDevice());
+ return CheckVkSuccess(
+ device->fn.CreateComputePipelines(device->GetVkDevice(), VK_NULL_HANDLE, 1, &createInfo,
+ nullptr, &mHandle),
+ "CreateComputePipeline");
}
ComputePipeline::~ComputePipeline() {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
index d1b589c8c41..ca358478792 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
@@ -18,6 +18,7 @@
#include "dawn_native/ComputePipeline.h"
#include "common/vulkan_platform.h"
+#include "dawn_native/Error.h"
namespace dawn_native { namespace vulkan {
@@ -25,12 +26,16 @@ namespace dawn_native { namespace vulkan {
class ComputePipeline : public ComputePipelineBase {
public:
- ComputePipeline(Device* device, const ComputePipelineDescriptor* descriptor);
+ static ResultOrError<ComputePipeline*> Create(Device* device,
+ const ComputePipelineDescriptor* descriptor);
~ComputePipeline();
VkPipeline GetHandle() const;
private:
+ using ComputePipelineBase::ComputePipelineBase;
+ MaybeError Initialize(const ComputePipelineDescriptor* descriptor);
+
VkPipeline mHandle = VK_NULL_HANDLE;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
index 025f2c71803..15385ab1bad 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
@@ -70,20 +70,27 @@ namespace dawn_native { namespace vulkan {
mMapRequestTracker = std::make_unique<MapRequestTracker>(this);
mMemoryAllocator = std::make_unique<MemoryAllocator>(this);
mRenderPassCache = std::make_unique<RenderPassCache>(this);
+ mResourceAllocator = std::make_unique<MemoryResourceAllocator>(this);
mExternalMemoryService = std::make_unique<external_memory::Service>(this);
mExternalSemaphoreService = std::make_unique<external_semaphore::Service>(this);
+ DAWN_TRY(PrepareRecordingContext());
+
return {};
}
Device::~Device() {
- // Immediately forget about all pending commands so we don't try to submit them in Tick
- FreeCommands(&mPendingCommands);
+ // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+ mRecordingContext.used = false;
+ fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
+
+ VkResult waitIdleResult = fn.QueueWaitIdle(mQueue);
+ // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
+ // about, Device lost, which means workloads running on the GPU are no longer accessible
+ // (so they are as good as waited on) or success.
+ DAWN_UNUSED(waitIdleResult);
- if (fn.QueueWaitIdle(mQueue) != VK_SUCCESS) {
- ASSERT(false);
- }
CheckPassedFences();
// Make sure all fences are complete by explicitly waiting on them all
@@ -109,8 +116,8 @@ namespace dawn_native { namespace vulkan {
Tick();
ASSERT(mCommandsInFlight.Empty());
- for (auto& commands : mUnusedCommands) {
- FreeCommands(&commands);
+ for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
+ fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
}
mUnusedCommands.clear();
@@ -146,52 +153,52 @@ namespace dawn_native { namespace vulkan {
ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
- return new BindGroup(this, descriptor);
+ return BindGroup::Create(this, descriptor);
}
ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) {
- return new BindGroupLayout(this, descriptor);
+ return BindGroupLayout::Create(this, descriptor);
}
ResultOrError<BufferBase*> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return new Buffer(this, descriptor);
+ return Buffer::Create(this, descriptor);
}
CommandBufferBase* Device::CreateCommandBuffer(CommandEncoderBase* encoder,
const CommandBufferDescriptor* descriptor) {
- return new CommandBuffer(encoder, descriptor);
+ return CommandBuffer::Create(encoder, descriptor);
}
ResultOrError<ComputePipelineBase*> Device::CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) {
- return new ComputePipeline(this, descriptor);
+ return ComputePipeline::Create(this, descriptor);
}
ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) {
- return new PipelineLayout(this, descriptor);
+ return PipelineLayout::Create(this, descriptor);
}
ResultOrError<QueueBase*> Device::CreateQueueImpl() {
- return new Queue(this);
+ return Queue::Create(this);
}
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
- return new RenderPipeline(this, descriptor);
+ return RenderPipeline::Create(this, descriptor);
}
ResultOrError<SamplerBase*> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return new Sampler(this, descriptor);
+ return Sampler::Create(this, descriptor);
}
ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor) {
- return new ShaderModule(this, descriptor);
+ return ShaderModule::Create(this, descriptor);
}
ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
- return new SwapChain(this, descriptor);
+ return SwapChain::Create(this, descriptor);
}
ResultOrError<TextureBase*> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return new Texture(this, descriptor);
+ return Texture::Create(this, descriptor);
}
ResultOrError<TextureViewBase*> Device::CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) {
- return new TextureView(texture, descriptor);
+ return TextureView::Create(texture, descriptor);
}
Serial Device::GetCompletedCommandSerial() const {
@@ -206,7 +213,7 @@ namespace dawn_native { namespace vulkan {
return mLastSubmittedSerial + 1;
}
- void Device::TickImpl() {
+ MaybeError Device::TickImpl() {
CheckPassedFences();
RecycleCompletedCommands();
@@ -214,20 +221,22 @@ namespace dawn_native { namespace vulkan {
// Uploader should tick before the resource allocator
// as it enqueues resources to be released.
- mDynamicUploader->Tick(mCompletedSerial);
+ mDynamicUploader->Deallocate(mCompletedSerial);
mMemoryAllocator->Tick(mCompletedSerial);
mDeleter->Tick(mCompletedSerial);
- if (mPendingCommands.pool != VK_NULL_HANDLE) {
- SubmitPendingCommands();
+ if (mRecordingContext.used) {
+ DAWN_TRY(SubmitPendingCommands());
} else if (mCompletedSerial == mLastSubmittedSerial) {
// If there's no GPU work in flight we still need to artificially increment the serial
// so that CPU operations waiting on GPU completion can know they don't have to wait.
mCompletedSerial++;
mLastSubmittedSerial++;
}
+
+ return {};
}
VkInstance Device::GetVkInstance() const {
@@ -265,40 +274,19 @@ namespace dawn_native { namespace vulkan {
return mRenderPassCache.get();
}
- VkCommandBuffer Device::GetPendingCommandBuffer() {
- if (mPendingCommands.pool == VK_NULL_HANDLE) {
- mPendingCommands = GetUnusedCommands();
-
- VkCommandBufferBeginInfo beginInfo;
- beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
- beginInfo.pNext = nullptr;
- beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
- beginInfo.pInheritanceInfo = nullptr;
-
- if (fn.BeginCommandBuffer(mPendingCommands.commandBuffer, &beginInfo) != VK_SUCCESS) {
- ASSERT(false);
- }
- }
-
- return mPendingCommands.commandBuffer;
- }
-
CommandRecordingContext* Device::GetPendingRecordingContext() {
- if (mRecordingContext.commandBuffer == VK_NULL_HANDLE) {
- mRecordingContext.commandBuffer = GetPendingCommandBuffer();
- }
-
+ ASSERT(mRecordingContext.commandBuffer != VK_NULL_HANDLE);
+ mRecordingContext.used = true;
return &mRecordingContext;
}
- void Device::SubmitPendingCommands() {
- if (mPendingCommands.pool == VK_NULL_HANDLE) {
- return;
+ MaybeError Device::SubmitPendingCommands() {
+ if (!mRecordingContext.used) {
+ return {};
}
- if (fn.EndCommandBuffer(mPendingCommands.commandBuffer) != VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(CheckVkSuccess(fn.EndCommandBuffer(mRecordingContext.commandBuffer),
+ "vkEndCommandBuffer"));
std::vector<VkPipelineStageFlags> dstStageMasks(mRecordingContext.waitSemaphores.size(),
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
@@ -311,21 +299,24 @@ namespace dawn_native { namespace vulkan {
submitInfo.pWaitSemaphores = mRecordingContext.waitSemaphores.data();
submitInfo.pWaitDstStageMask = dstStageMasks.data();
submitInfo.commandBufferCount = 1;
- submitInfo.pCommandBuffers = &mPendingCommands.commandBuffer;
+ submitInfo.pCommandBuffers = &mRecordingContext.commandBuffer;
submitInfo.signalSemaphoreCount =
static_cast<uint32_t>(mRecordingContext.signalSemaphores.size());
submitInfo.pSignalSemaphores = mRecordingContext.signalSemaphores.data();
- VkFence fence = GetUnusedFence();
- if (fn.QueueSubmit(mQueue, 1, &submitInfo, fence) != VK_SUCCESS) {
- ASSERT(false);
- }
+ VkFence fence = VK_NULL_HANDLE;
+ DAWN_TRY_ASSIGN(fence, GetUnusedFence());
+ DAWN_TRY(CheckVkSuccess(fn.QueueSubmit(mQueue, 1, &submitInfo, fence), "vkQueueSubmit"));
mLastSubmittedSerial++;
- mCommandsInFlight.Enqueue(mPendingCommands, mLastSubmittedSerial);
- mPendingCommands = CommandPoolAndBuffer();
mFencesInFlight.emplace(fence, mLastSubmittedSerial);
+ CommandPoolAndBuffer submittedCommands = {mRecordingContext.commandPool,
+ mRecordingContext.commandBuffer};
+ mCommandsInFlight.Enqueue(submittedCommands, mLastSubmittedSerial);
+ mRecordingContext = CommandRecordingContext();
+ DAWN_TRY(PrepareRecordingContext());
+
for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
mDeleter->DeleteWhenUnused(semaphore);
}
@@ -334,7 +325,7 @@ namespace dawn_native { namespace vulkan {
mDeleter->DeleteWhenUnused(semaphore);
}
- mRecordingContext = CommandRecordingContext();
+ return {};
}
ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalDevice) {
@@ -357,6 +348,10 @@ namespace dawn_native { namespace vulkan {
extensionsToRequest.push_back(kExtensionNameKhrExternalMemoryFD);
usedKnobs.externalMemoryFD = true;
}
+ if (mDeviceInfo.externalMemoryZirconHandle) {
+ extensionsToRequest.push_back(kExtensionNameFuchsiaExternalMemory);
+ usedKnobs.externalMemoryZirconHandle = true;
+ }
if (mDeviceInfo.externalSemaphore) {
extensionsToRequest.push_back(kExtensionNameKhrExternalSemaphore);
usedKnobs.externalSemaphore = true;
@@ -365,10 +360,18 @@ namespace dawn_native { namespace vulkan {
extensionsToRequest.push_back(kExtensionNameKhrExternalSemaphoreFD);
usedKnobs.externalSemaphoreFD = true;
}
+ if (mDeviceInfo.externalSemaphoreZirconHandle) {
+ extensionsToRequest.push_back(kExtensionNameFuchsiaExternalSemaphore);
+ usedKnobs.externalSemaphoreZirconHandle = true;
+ }
if (mDeviceInfo.swapchain) {
extensionsToRequest.push_back(kExtensionNameKhrSwapchain);
usedKnobs.swapchain = true;
}
+ if (mDeviceInfo.maintenance1) {
+ extensionsToRequest.push_back(kExtensionNameKhrMaintenance1);
+ usedKnobs.maintenance1 = true;
+ }
// Always require independentBlend because it is a core Dawn feature
usedKnobs.features.independentBlend = VK_TRUE;
@@ -447,9 +450,11 @@ namespace dawn_native { namespace vulkan {
return const_cast<VulkanFunctions*>(&fn);
}
- VkFence Device::GetUnusedFence() {
+ ResultOrError<VkFence> Device::GetUnusedFence() {
if (!mUnusedFences.empty()) {
VkFence fence = mUnusedFences.back();
+ DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &fence), "vkResetFences"));
+
mUnusedFences.pop_back();
return fence;
}
@@ -460,9 +465,8 @@ namespace dawn_native { namespace vulkan {
createInfo.flags = 0;
VkFence fence = VK_NULL_HANDLE;
- if (fn.CreateFence(mVkDevice, &createInfo, nullptr, &fence) != VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &fence),
+ "vkCreateFence"));
return fence;
}
@@ -481,11 +485,7 @@ namespace dawn_native { namespace vulkan {
return;
}
- if (fn.ResetFences(mVkDevice, 1, &fence) != VK_SUCCESS) {
- ASSERT(false);
- }
mUnusedFences.push_back(fence);
-
mFencesInFlight.pop();
ASSERT(fenceSerial > mCompletedSerial);
@@ -493,63 +493,66 @@ namespace dawn_native { namespace vulkan {
}
}
- Device::CommandPoolAndBuffer Device::GetUnusedCommands() {
+ MaybeError Device::PrepareRecordingContext() {
+ ASSERT(!mRecordingContext.used);
+ ASSERT(mRecordingContext.commandBuffer == VK_NULL_HANDLE);
+ ASSERT(mRecordingContext.commandPool == VK_NULL_HANDLE);
+
+ // First try to recycle unused command pools.
if (!mUnusedCommands.empty()) {
CommandPoolAndBuffer commands = mUnusedCommands.back();
mUnusedCommands.pop_back();
- return commands;
- }
-
- CommandPoolAndBuffer commands;
-
- VkCommandPoolCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
- createInfo.queueFamilyIndex = mQueueFamily;
-
- if (fn.CreateCommandPool(mVkDevice, &createInfo, nullptr, &commands.pool) != VK_SUCCESS) {
- ASSERT(false);
- }
-
- VkCommandBufferAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.commandPool = commands.pool;
- allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
- allocateInfo.commandBufferCount = 1;
-
- if (fn.AllocateCommandBuffers(mVkDevice, &allocateInfo, &commands.commandBuffer) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
-
- return commands;
+ DAWN_TRY(CheckVkSuccess(fn.ResetCommandPool(mVkDevice, commands.pool, 0),
+ "vkResetCommandPool"));
+
+ mRecordingContext.commandBuffer = commands.commandBuffer;
+ mRecordingContext.commandPool = commands.pool;
+ } else {
+ // Create a new command pool for our commands and allocate the command buffer.
+ VkCommandPoolCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
+ createInfo.queueFamilyIndex = mQueueFamily;
+
+ DAWN_TRY(CheckVkSuccess(fn.CreateCommandPool(mVkDevice, &createInfo, nullptr,
+ &mRecordingContext.commandPool),
+ "vkCreateCommandPool"));
+
+ VkCommandBufferAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.commandPool = mRecordingContext.commandPool;
+ allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ allocateInfo.commandBufferCount = 1;
+
+ DAWN_TRY(CheckVkSuccess(fn.AllocateCommandBuffers(mVkDevice, &allocateInfo,
+ &mRecordingContext.commandBuffer),
+ "vkAllocateCommandBuffers"));
+ }
+
+ // Start the recording of commands in the command buffer.
+ VkCommandBufferBeginInfo beginInfo;
+ beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ beginInfo.pNext = nullptr;
+ beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ beginInfo.pInheritanceInfo = nullptr;
+
+ return CheckVkSuccess(fn.BeginCommandBuffer(mRecordingContext.commandBuffer, &beginInfo),
+ "vkBeginCommandBuffer");
}
void Device::RecycleCompletedCommands() {
for (auto& commands : mCommandsInFlight.IterateUpTo(mCompletedSerial)) {
- if (fn.ResetCommandPool(mVkDevice, commands.pool, 0) != VK_SUCCESS) {
- ASSERT(false);
- }
mUnusedCommands.push_back(commands);
}
mCommandsInFlight.ClearUpTo(mCompletedSerial);
}
- void Device::FreeCommands(CommandPoolAndBuffer* commands) {
- if (commands->pool != VK_NULL_HANDLE) {
- fn.DestroyCommandPool(mVkDevice, commands->pool, nullptr);
- commands->pool = VK_NULL_HANDLE;
- }
-
- // Command buffers are implicitly destroyed when the command pool is.
- commands->commandBuffer = VK_NULL_HANDLE;
- }
-
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
std::unique_ptr<StagingBufferBase> stagingBuffer =
std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
return std::move(stagingBuffer);
}
@@ -558,6 +561,8 @@ namespace dawn_native { namespace vulkan {
BufferBase* destination,
uint64_t destinationOffset,
uint64_t size) {
+ CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
// Insert memory barrier to ensure host write operations are made visible before
// copying from the staging buffer. However, this barrier can be removed (see note below).
//
@@ -567,15 +572,15 @@ namespace dawn_native { namespace vulkan {
// Insert pipeline barrier to ensure correct ordering with previous memory operations on the
// buffer.
- ToBackend(destination)
- ->TransitionUsageNow(GetPendingRecordingContext(), dawn::BufferUsage::CopyDst);
+ ToBackend(destination)->TransitionUsageNow(recordingContext, dawn::BufferUsage::CopyDst);
VkBufferCopy copy;
copy.srcOffset = sourceOffset;
copy.dstOffset = destinationOffset;
copy.size = size;
- this->fn.CmdCopyBuffer(GetPendingCommandBuffer(), ToBackend(source)->GetBufferHandle(),
+ this->fn.CmdCopyBuffer(recordingContext->commandBuffer,
+ ToBackend(source)->GetBufferHandle(),
ToBackend(destination)->GetHandle(), 1, &copy);
return {};
@@ -656,9 +661,14 @@ namespace dawn_native { namespace vulkan {
std::vector<VkSemaphore> waitSemaphores;
waitSemaphores.reserve(waitHandles.size());
- // If failed, cleanup
+ // Cleanup in case of a failure, the image creation doesn't acquire the external objects
+ // if a failure happems.
+ Texture* result = nullptr;
if (ConsumedError(ImportExternalImage(descriptor, memoryHandle, waitHandles,
- &signalSemaphore, &allocation, &waitSemaphores))) {
+ &signalSemaphore, &allocation, &waitSemaphores)) ||
+ ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
+ signalSemaphore, allocation, waitSemaphores),
+ &result)) {
// Clear the signal semaphore
fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
@@ -672,7 +682,26 @@ namespace dawn_native { namespace vulkan {
return nullptr;
}
- return new Texture(this, descriptor, textureDescriptor, signalSemaphore, allocation,
- waitSemaphores);
+ return result;
+ }
+
+ ResultOrError<ResourceMemoryAllocation> Device::AllocateMemory(
+ VkMemoryRequirements requirements,
+ bool mappable) {
+ // TODO(crbug.com/dawn/27): Support sub-allocation.
+ ResourceMemoryAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation, mResourceAllocator->Allocate(requirements, mappable));
+ return allocation;
+ }
+
+ void Device::DeallocateMemory(ResourceMemoryAllocation& allocation) {
+ if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return;
+ }
+ mResourceAllocator->Deallocate(allocation);
+
+ // Invalidate the underlying resource heap in case the client accidentally
+ // calls DeallocateMemory again using the same allocation.
+ allocation.Invalidate();
}
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
index c3ff3522e2b..52cf7673340 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
@@ -22,6 +22,7 @@
#include "dawn_native/Device.h"
#include "dawn_native/vulkan/CommandRecordingContext.h"
#include "dawn_native/vulkan/Forward.h"
+#include "dawn_native/vulkan/MemoryResourceAllocatorVk.h"
#include "dawn_native/vulkan/VulkanFunctions.h"
#include "dawn_native/vulkan/VulkanInfo.h"
@@ -63,10 +64,9 @@ namespace dawn_native { namespace vulkan {
MemoryAllocator* GetMemoryAllocator() const;
RenderPassCache* GetRenderPassCache() const;
- VkCommandBuffer GetPendingCommandBuffer();
CommandRecordingContext* GetPendingRecordingContext();
Serial GetPendingCommandSerial() const override;
- void SubmitPendingCommands();
+ MaybeError SubmitPendingCommands();
TextureBase* CreateTextureWrappingVulkanImage(
const ExternalImageDescriptor* descriptor,
@@ -82,7 +82,7 @@ namespace dawn_native { namespace vulkan {
Serial GetCompletedCommandSerial() const final override;
Serial GetLastSubmittedCommandSerial() const final override;
- void TickImpl() override;
+ MaybeError TickImpl() override;
ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
@@ -91,6 +91,11 @@ namespace dawn_native { namespace vulkan {
uint64_t destinationOffset,
uint64_t size) override;
+ ResultOrError<ResourceMemoryAllocation> AllocateMemory(VkMemoryRequirements requirements,
+ bool mappable);
+
+ void DeallocateMemory(ResourceMemoryAllocation& allocation);
+
private:
ResultOrError<BindGroupBase*> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
@@ -128,6 +133,8 @@ namespace dawn_native { namespace vulkan {
uint32_t mQueueFamily = 0;
VkQueue mQueue = VK_NULL_HANDLE;
+ std::unique_ptr<MemoryResourceAllocator> mResourceAllocator;
+
std::unique_ptr<FencedDeleter> mDeleter;
std::unique_ptr<MapRequestTracker> mMapRequestTracker;
std::unique_ptr<MemoryAllocator> mMemoryAllocator;
@@ -136,7 +143,7 @@ namespace dawn_native { namespace vulkan {
std::unique_ptr<external_memory::Service> mExternalMemoryService;
std::unique_ptr<external_semaphore::Service> mExternalSemaphoreService;
- VkFence GetUnusedFence();
+ ResultOrError<VkFence> GetUnusedFence();
void CheckPassedFences();
// We track which operations are in flight on the GPU with an increasing serial.
@@ -144,22 +151,22 @@ namespace dawn_native { namespace vulkan {
// to a serial and a fence, such that when the fence is "ready" we know the operations
// have finished.
std::queue<std::pair<VkFence, Serial>> mFencesInFlight;
+ // Fences in the unused list aren't reset yet.
std::vector<VkFence> mUnusedFences;
Serial mCompletedSerial = 0;
Serial mLastSubmittedSerial = 0;
+ MaybeError PrepareRecordingContext();
+ void RecycleCompletedCommands();
+
struct CommandPoolAndBuffer {
VkCommandPool pool = VK_NULL_HANDLE;
VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
};
-
- CommandPoolAndBuffer GetUnusedCommands();
- void RecycleCompletedCommands();
- void FreeCommands(CommandPoolAndBuffer* commands);
-
SerialQueue<CommandPoolAndBuffer> mCommandsInFlight;
+ // Command pools in the unused list haven't been reset yet.
std::vector<CommandPoolAndBuffer> mUnusedCommands;
- CommandPoolAndBuffer mPendingCommands;
+ // There is always a valid recording context stored in mRecordingContext
CommandRecordingContext mRecordingContext;
MaybeError ImportExternalImage(const ExternalImageDescriptor* descriptor,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ExternalHandle.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ExternalHandle.h
index 37a2e21d1e8..45206b36f6d 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ExternalHandle.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ExternalHandle.h
@@ -1,13 +1,20 @@
#ifndef DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
#define DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
+#include "common/vulkan_platform.h"
+
namespace dawn_native { namespace vulkan {
-#ifdef DAWN_PLATFORM_LINUX
+#if DAWN_PLATFORM_LINUX
// File descriptor
using ExternalMemoryHandle = int;
// File descriptor
using ExternalSemaphoreHandle = int;
+#elif DAWN_PLATFORM_FUCHSIA
+ // Really a Zircon vmo handle.
+ using ExternalMemoryHandle = zx_handle_t;
+ // Really a Zircon event handle.
+ using ExternalSemaphoreHandle = zx_handle_t;
#else
// Generic types so that the Null service can compile, not used for real handles
using ExternalMemoryHandle = void*;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h b/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
index 344678a20c2..4dd1c246f9e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
@@ -29,6 +29,7 @@ namespace dawn_native { namespace vulkan {
class PipelineLayout;
class Queue;
class RenderPipeline;
+ class ResourceMemory;
class Sampler;
class ShaderModule;
class StagingBuffer;
@@ -47,6 +48,7 @@ namespace dawn_native { namespace vulkan {
using PipelineLayoutType = PipelineLayout;
using QueueType = Queue;
using RenderPipelineType = RenderPipeline;
+ using ResourceHeapType = ResourceMemory;
using SamplerType = Sampler;
using ShaderModuleType = ShaderModule;
using StagingBufferType = StagingBuffer;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.cpp
index abd53da6f16..c977bde06cb 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.cpp
@@ -98,12 +98,7 @@ namespace dawn_native { namespace vulkan {
bool mappable,
DeviceMemoryAllocation* allocation) {
int bestType = FindBestTypeIndex(requirements, mappable);
-
- // TODO(cwallez@chromium.org): I think the Vulkan spec guarantees this should never happen
- if (bestType == -1) {
- ASSERT(false);
- return false;
- }
+ ASSERT(bestType >= 0);
VkMemoryAllocateInfo allocateInfo;
allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.cpp
new file mode 100644
index 00000000000..c86e6a1b782
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.cpp
@@ -0,0 +1,119 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/FencedDeleter.h"
+#include "dawn_native/vulkan/ResourceMemoryVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
+
+namespace dawn_native { namespace vulkan {
+
+ MemoryResourceAllocator::MemoryResourceAllocator(Device* device) : mDevice(device) {
+ }
+
+ int MemoryResourceAllocator::FindBestTypeIndex(VkMemoryRequirements requirements,
+ bool mappable) {
+ const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+
+ // Find a suitable memory type for this allocation
+ int bestType = -1;
+ for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
+ // Resource must support this memory type
+ if ((requirements.memoryTypeBits & (1 << i)) == 0) {
+ continue;
+ }
+
+ // Mappable resource must be host visible
+ if (mappable &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+ continue;
+ }
+
+ // Mappable must also be host coherent.
+ if (mappable &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
+ continue;
+ }
+
+ // Found the first candidate memory type
+ if (bestType == -1) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
+
+ // For non-mappable resources, favor device local memory.
+ if (!mappable) {
+ if ((info.memoryTypes[bestType].propertyFlags &
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) == 0 &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) !=
+ 0) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
+ }
+
+ // All things equal favor the memory in the biggest heap
+ VkDeviceSize bestTypeHeapSize =
+ info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
+ VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
+ if (candidateHeapSize > bestTypeHeapSize) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
+ }
+
+ return bestType;
+ }
+
+ ResultOrError<ResourceMemoryAllocation> MemoryResourceAllocator::Allocate(
+ VkMemoryRequirements requirements,
+ bool mappable) {
+ int bestType = FindBestTypeIndex(requirements, mappable);
+
+ // TODO(cwallez@chromium.org): I think the Vulkan spec guarantees this should never
+ // happen
+ if (bestType == -1) {
+ return DAWN_DEVICE_LOST_ERROR("Unable to find memory for requirements.");
+ }
+
+ VkMemoryAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.allocationSize = requirements.size;
+ allocateInfo.memoryTypeIndex = static_cast<uint32_t>(bestType);
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+ nullptr, &allocatedMemory),
+ "vkAllocateMemory"));
+
+ void* mappedPointer = nullptr;
+ if (mappable) {
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(), allocatedMemory,
+ 0, requirements.size, 0, &mappedPointer),
+ "vkMapMemory"));
+ }
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+
+ return ResourceMemoryAllocation(info, /*offset*/ 0, new ResourceMemory(allocatedMemory),
+ static_cast<uint8_t*>(mappedPointer));
+ }
+
+ void MemoryResourceAllocator::Deallocate(ResourceMemoryAllocation& allocation) {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(
+ ToBackend(allocation.GetResourceHeap())->GetMemory());
+ }
+}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.h
new file mode 100644
index 00000000000..b26d12a50b9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.h
@@ -0,0 +1,43 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_MEMORYRESOURCEALLOCATORVK_H_
+#define DAWNNATIVE_VULKAN_MEMORYRESOURCEALLOCATORVK_H_
+
+#include "common/vulkan_platform.h"
+#include "dawn_native/Error.h"
+#include "dawn_native/ResourceMemoryAllocation.h"
+
+namespace dawn_native { namespace vulkan {
+
+ class Device;
+
+ class MemoryResourceAllocator {
+ public:
+ MemoryResourceAllocator(Device* device);
+ ~MemoryResourceAllocator() = default;
+
+ ResultOrError<ResourceMemoryAllocation> Allocate(VkMemoryRequirements requirements,
+ bool mappable);
+ void Deallocate(ResourceMemoryAllocation& allocation);
+
+ private:
+ int FindBestTypeIndex(VkMemoryRequirements requirements, bool mappable);
+
+ Device* mDevice;
+ };
+
+}} // namespace dawn_native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_MEMORYRESOURCEALLOCATORVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
index 858d478461f..bd7e499da60 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
@@ -24,8 +24,30 @@ namespace dawn_native { namespace vulkan {
namespace {
+ bool chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes,
+ bool turnOffVsync,
+ VkPresentModeKHR* presentMode) {
+ if (turnOffVsync) {
+ for (const auto& availablePresentMode : availablePresentModes) {
+ if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
+ *presentMode = availablePresentMode;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ *presentMode = VK_PRESENT_MODE_FIFO_KHR;
+ return true;
+ }
+
bool ChooseSurfaceConfig(const VulkanSurfaceInfo& info,
- NativeSwapChainImpl::ChosenConfig* config) {
+ NativeSwapChainImpl::ChosenConfig* config,
+ bool turnOffVsync) {
+ VkPresentModeKHR presentMode;
+ if (!chooseSwapPresentMode(info.presentModes, turnOffVsync, &presentMode)) {
+ return false;
+ }
// TODO(cwallez@chromium.org): For now this is hardcoded to what works with one NVIDIA
// driver. Need to generalize
config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
@@ -35,11 +57,11 @@ namespace dawn_native { namespace vulkan {
// TODO(cwallez@chromium.org): This is upside down compared to what we want, at least
// on Linux
config->preTransform = info.capabilities.currentTransform;
- config->presentMode = VK_PRESENT_MODE_FIFO_KHR;
+ config->presentMode = presentMode;
config->compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+
return true;
}
-
} // anonymous namespace
NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
@@ -63,7 +85,7 @@ namespace dawn_native { namespace vulkan {
ASSERT(false);
}
- if (!ChooseSurfaceConfig(mInfo, &mConfig)) {
+ if (!ChooseSurfaceConfig(mInfo, &mConfig, mDevice->IsToggleEnabled(Toggle::TurnOffVsync))) {
ASSERT(false);
}
}
@@ -132,7 +154,7 @@ namespace dawn_native { namespace vulkan {
// Do the initial layout transition for all these images from an undefined layout to
// present so that it matches the "present" usage after the first GetNextTexture.
- VkCommandBuffer commands = mDevice->GetPendingCommandBuffer();
+ CommandRecordingContext* recordingContext = mDevice->GetPendingRecordingContext();
for (VkImage image : mSwapChainImages) {
VkImageMemoryBarrier barrier;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
@@ -150,9 +172,9 @@ namespace dawn_native { namespace vulkan {
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
- mDevice->fn.CmdPipelineBarrier(commands, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
- VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
- nullptr, 1, &barrier);
+ mDevice->fn.CmdPipelineBarrier(
+ recordingContext->commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
if (oldSwapchain != VK_NULL_HANDLE) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
index 4b5615ef3be..dd123af35ed 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
@@ -14,16 +14,25 @@
#include "dawn_native/vulkan/PipelineLayoutVk.h"
+#include "common/BitSetIterator.h"
#include "dawn_native/vulkan/BindGroupLayoutVk.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
-
-#include "common/BitSetIterator.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
- PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
- : PipelineLayoutBase(device, descriptor) {
+ // static
+ ResultOrError<PipelineLayout*> PipelineLayout::Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor) {
+ std::unique_ptr<PipelineLayout> layout =
+ std::make_unique<PipelineLayout>(device, descriptor);
+ DAWN_TRY(layout->Initialize());
+ return layout.release();
+ }
+
+ MaybeError PipelineLayout::Initialize() {
// Compute the array of VkDescriptorSetLayouts that will be chained in the create info.
// TODO(cwallez@chromium.org) Vulkan doesn't allow holes in this array, should we expose
// this constraints at the Dawn level?
@@ -43,10 +52,10 @@ namespace dawn_native { namespace vulkan {
createInfo.pushConstantRangeCount = 0;
createInfo.pPushConstantRanges = nullptr;
- if (device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr,
- &mHandle) != VK_SUCCESS) {
- ASSERT(false);
- }
+ Device* device = ToBackend(GetDevice());
+ return CheckVkSuccess(
+ device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ "CreatePipelineLayout");
}
PipelineLayout::~PipelineLayout() {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h
index a5072eb5722..744eb847912 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h
@@ -18,6 +18,7 @@
#include "dawn_native/PipelineLayout.h"
#include "common/vulkan_platform.h"
+#include "dawn_native/Error.h"
namespace dawn_native { namespace vulkan {
@@ -25,12 +26,16 @@ namespace dawn_native { namespace vulkan {
class PipelineLayout : public PipelineLayoutBase {
public:
- PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+ static ResultOrError<PipelineLayout*> Create(Device* device,
+ const PipelineLayoutDescriptor* descriptor);
~PipelineLayout();
VkPipelineLayout GetHandle() const;
private:
+ using PipelineLayoutBase::PipelineLayoutBase;
+ MaybeError Initialize();
+
VkPipelineLayout mHandle = VK_NULL_HANDLE;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
index c268bcd3d48..558927c52a7 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
@@ -20,23 +20,27 @@
namespace dawn_native { namespace vulkan {
- Queue::Queue(Device* device) : QueueBase(device) {
+ // static
+ Queue* Queue::Create(Device* device) {
+ return new Queue(device);
}
Queue::~Queue() {
}
- void Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
Device* device = ToBackend(GetDevice());
device->Tick();
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
for (uint32_t i = 0; i < commandCount; ++i) {
- ToBackend(commands[i])->RecordCommands(recordingContext);
+ DAWN_TRY(ToBackend(commands[i])->RecordCommands(recordingContext));
}
- device->SubmitPendingCommands();
+ DAWN_TRY(device->SubmitPendingCommands());
+
+ return {};
}
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h
index 2477c5ae46c..39e8314a25e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h
@@ -24,11 +24,13 @@ namespace dawn_native { namespace vulkan {
class Queue : public QueueBase {
public:
- Queue(Device* device);
+ static Queue* Create(Device* device);
~Queue();
private:
- void SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ using QueueBase::QueueBase;
+
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
index daeb5a43ab1..e6c79f1152c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
@@ -18,6 +18,7 @@
#include "common/HashUtils.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/TextureVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -71,18 +72,19 @@ namespace dawn_native { namespace vulkan {
mCache.clear();
}
- VkRenderPass RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
+ ResultOrError<VkRenderPass> RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
auto it = mCache.find(query);
if (it != mCache.end()) {
- return it->second;
+ return VkRenderPass(it->second);
}
- VkRenderPass renderPass = CreateRenderPassForQuery(query);
+ VkRenderPass renderPass;
+ DAWN_TRY_ASSIGN(renderPass, CreateRenderPassForQuery(query));
mCache.emplace(query, renderPass);
return renderPass;
}
- VkRenderPass RenderPassCache::CreateRenderPassForQuery(
+ ResultOrError<VkRenderPass> RenderPassCache::CreateRenderPassForQuery(
const RenderPassCacheQuery& query) const {
// The Vulkan subpasses want to know the layout of the attachments with VkAttachmentRef.
// Precompute them as they must be pointer-chained in VkSubpassDescription
@@ -189,11 +191,9 @@ namespace dawn_native { namespace vulkan {
// Create the render pass from the zillion parameters
VkRenderPass renderPass;
- if (mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo, nullptr,
- &renderPass) != VK_SUCCESS) {
- ASSERT(false);
- }
-
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo, nullptr, &renderPass),
+ "CreateRenderPass"));
return renderPass;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
index 8410cea0fa0..3a4eeee6e1b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
@@ -15,9 +15,9 @@
#ifndef DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
#define DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
-#include "common/vulkan_platform.h"
-
#include "common/Constants.h"
+#include "common/vulkan_platform.h"
+#include "dawn_native/Error.h"
#include "dawn_native/dawn_platform.h"
#include <array>
@@ -66,11 +66,12 @@ namespace dawn_native { namespace vulkan {
RenderPassCache(Device* device);
~RenderPassCache();
- VkRenderPass GetRenderPass(const RenderPassCacheQuery& query);
+ ResultOrError<VkRenderPass> GetRenderPass(const RenderPassCacheQuery& query);
private:
// Does the actual VkRenderPass creation on a cache miss.
- VkRenderPass CreateRenderPassForQuery(const RenderPassCacheQuery& query) const;
+ ResultOrError<VkRenderPass> CreateRenderPassForQuery(
+ const RenderPassCacheQuery& query) const;
// Implements the functors necessary for to use RenderPassCacheQueries as unordered_map
// keys.
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
index 8b8f5bf2666..2e84df02de6 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
@@ -21,6 +21,7 @@
#include "dawn_native/vulkan/ShaderModuleVk.h"
#include "dawn_native/vulkan/TextureVk.h"
#include "dawn_native/vulkan/UtilsVulkan.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -207,7 +208,8 @@ namespace dawn_native { namespace vulkan {
}
}
- VkColorComponentFlagBits VulkanColorWriteMask(dawn::ColorWriteMask mask) {
+ VkColorComponentFlags VulkanColorWriteMask(dawn::ColorWriteMask mask,
+ bool isDeclaredInFragmentShader) {
// Vulkan and Dawn color write masks match, static assert it and return the mask
static_assert(static_cast<VkColorComponentFlagBits>(dawn::ColorWriteMask::Red) ==
VK_COLOR_COMPONENT_R_BIT,
@@ -222,11 +224,16 @@ namespace dawn_native { namespace vulkan {
VK_COLOR_COMPONENT_A_BIT,
"");
- return static_cast<VkColorComponentFlagBits>(mask);
+ // According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color
+ // attachment writes are undefined for components which do not correspond to a fragment
+ // shader outputs", we set the color write mask to 0 to prevent such undefined values
+ // being written into the color attachments.
+ return isDeclaredInFragmentShader ? static_cast<VkColorComponentFlags>(mask)
+ : static_cast<VkColorComponentFlags>(0);
}
- VkPipelineColorBlendAttachmentState ComputeColorDesc(
- const ColorStateDescriptor* descriptor) {
+ VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorStateDescriptor* descriptor,
+ bool isDeclaredInFragmentShader) {
VkPipelineColorBlendAttachmentState attachment;
attachment.blendEnable = BlendEnabled(descriptor) ? VK_TRUE : VK_FALSE;
attachment.srcColorBlendFactor = VulkanBlendFactor(descriptor->colorBlend.srcFactor);
@@ -235,7 +242,8 @@ namespace dawn_native { namespace vulkan {
attachment.srcAlphaBlendFactor = VulkanBlendFactor(descriptor->alphaBlend.srcFactor);
attachment.dstAlphaBlendFactor = VulkanBlendFactor(descriptor->alphaBlend.dstFactor);
attachment.alphaBlendOp = VulkanBlendOperation(descriptor->alphaBlend.operation);
- attachment.colorWriteMask = VulkanColorWriteMask(descriptor->writeMask);
+ attachment.colorWriteMask =
+ VulkanColorWriteMask(descriptor->writeMask, isDeclaredInFragmentShader);
return attachment;
}
@@ -311,8 +319,19 @@ namespace dawn_native { namespace vulkan {
} // anonymous namespace
- RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
- : RenderPipelineBase(device, descriptor) {
+ // static
+ ResultOrError<RenderPipeline*> RenderPipeline::Create(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ std::unique_ptr<RenderPipeline> pipeline =
+ std::make_unique<RenderPipeline>(device, descriptor);
+ DAWN_TRY(pipeline->Initialize(descriptor));
+ return pipeline.release();
+ }
+
+ MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor* descriptor) {
+ Device* device = ToBackend(GetDevice());
+
VkPipelineShaderStageCreateInfo shaderStages[2];
{
shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
@@ -400,9 +419,13 @@ namespace dawn_native { namespace vulkan {
// Initialize the "blend state info" that will be chained in the "create info" from the data
// pre-computed in the ColorState
std::array<VkPipelineColorBlendAttachmentState, kMaxColorAttachments> colorBlendAttachments;
+ const ShaderModuleBase::FragmentOutputBaseTypes& fragmentOutputBaseTypes =
+ descriptor->fragmentStage->module->GetFragmentOutputBaseTypes();
for (uint32_t i : IterateBitSet(GetColorAttachmentsMask())) {
- const ColorStateDescriptor* descriptor = GetColorStateDescriptor(i);
- colorBlendAttachments[i] = ComputeColorDesc(descriptor);
+ const ColorStateDescriptor* colorStateDescriptor = GetColorStateDescriptor(i);
+ bool isDeclaredInFragmentShader = fragmentOutputBaseTypes[i] != Format::Other;
+ colorBlendAttachments[i] =
+ ComputeColorDesc(colorStateDescriptor, isDeclaredInFragmentShader);
}
VkPipelineColorBlendStateCreateInfo colorBlend;
colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
@@ -451,7 +474,7 @@ namespace dawn_native { namespace vulkan {
query.SetSampleCount(GetSampleCount());
- renderPass = device->GetRenderPassCache()->GetRenderPass(query);
+ DAWN_TRY_ASSIGN(renderPass, device->GetRenderPassCache()->GetRenderPass(query));
}
// The create info chains in a bunch of things created on the stack here or inside state
@@ -477,10 +500,10 @@ namespace dawn_native { namespace vulkan {
createInfo.basePipelineHandle = VK_NULL_HANDLE;
createInfo.basePipelineIndex = -1;
- if (device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VK_NULL_HANDLE, 1,
- &createInfo, nullptr, &mHandle) != VK_SUCCESS) {
- ASSERT(false);
- }
+ return CheckVkSuccess(
+ device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VK_NULL_HANDLE, 1,
+ &createInfo, nullptr, &mHandle),
+ "CreateGraphicsPipeline");
}
VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
index 083c3abb99b..9d2d300dd5a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
@@ -18,6 +18,7 @@
#include "dawn_native/RenderPipeline.h"
#include "common/vulkan_platform.h"
+#include "dawn_native/Error.h"
namespace dawn_native { namespace vulkan {
@@ -25,12 +26,16 @@ namespace dawn_native { namespace vulkan {
class RenderPipeline : public RenderPipelineBase {
public:
- RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
+ static ResultOrError<RenderPipeline*> Create(Device* device,
+ const RenderPipelineDescriptor* descriptor);
~RenderPipeline();
VkPipeline GetHandle() const;
private:
+ using RenderPipelineBase::RenderPipelineBase;
+ MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
+
VkPipelineVertexInputStateCreateInfo ComputeVertexInputDesc(
const VertexInputDescriptor* vertexInput,
std::array<VkVertexInputBindingDescription, kMaxVertexBuffers>* mBindings,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.cpp
new file mode 100644
index 00000000000..287323813bf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.cpp
@@ -0,0 +1,26 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/vulkan/ResourceMemoryVk.h"
+
+namespace dawn_native { namespace vulkan {
+
+ ResourceMemory::ResourceMemory(VkDeviceMemory memory) : mMemory(memory) {
+ }
+
+ VkDeviceMemory ResourceMemory::GetMemory() const {
+ return mMemory;
+ }
+
+}} // namespace dawn_native::vulkan \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.h
new file mode 100644
index 00000000000..eab8b3202be
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.h
@@ -0,0 +1,36 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_RESOURCEMEMORYVK_H_
+#define DAWNNATIVE_VULKAN_RESOURCEMEMORYVK_H_
+
+#include "common/vulkan_platform.h"
+#include "dawn_native/ResourceHeap.h"
+
+namespace dawn_native { namespace vulkan {
+
+ // Wrapper for physical memory used with or without a resource object.
+ class ResourceMemory : public ResourceHeapBase {
+ public:
+ ResourceMemory(VkDeviceMemory memory);
+ ~ResourceMemory() = default;
+
+ VkDeviceMemory GetMemory() const;
+
+ private:
+ VkDeviceMemory mMemory = VK_NULL_HANDLE;
+ };
+}} // namespace dawn_native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_RESOURCEMEMORYVK_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
index 5de2411fe05..633fb5dde4b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
@@ -17,6 +17,7 @@
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/UtilsVulkan.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -57,8 +58,14 @@ namespace dawn_native { namespace vulkan {
}
} // anonymous namespace
- Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
- : SamplerBase(device, descriptor), mDevice(device) {
+ // static
+ ResultOrError<Sampler*> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+ std::unique_ptr<Sampler> sampler = std::make_unique<Sampler>(device, descriptor);
+ DAWN_TRY(sampler->Initialize(descriptor));
+ return sampler.release();
+ }
+
+ MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
VkSamplerCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
createInfo.pNext = nullptr;
@@ -78,15 +85,15 @@ namespace dawn_native { namespace vulkan {
createInfo.maxLod = descriptor->lodMaxClamp;
createInfo.unnormalizedCoordinates = VK_FALSE;
- if (device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &mHandle) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
+ Device* device = ToBackend(GetDevice());
+ return CheckVkSuccess(
+ device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ "CreateSampler");
}
Sampler::~Sampler() {
if (mHandle != VK_NULL_HANDLE) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
mHandle = VK_NULL_HANDLE;
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
index 2bd51f17924..9ea7e0fb924 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
@@ -18,20 +18,23 @@
#include "dawn_native/Sampler.h"
#include "common/vulkan_platform.h"
+#include "dawn_native/Error.h"
#include "dawn_native/vulkan/MemoryAllocator.h"
namespace dawn_native { namespace vulkan {
class Sampler : public SamplerBase {
public:
- Sampler(Device* device, const SamplerDescriptor* descriptor);
+ static ResultOrError<Sampler*> Create(Device* device, const SamplerDescriptor* descriptor);
~Sampler();
VkSampler GetHandle() const;
private:
+ using SamplerBase::SamplerBase;
+ MaybeError Initialize(const SamplerDescriptor* descriptor);
+
VkSampler mHandle = VK_NULL_HANDLE;
- Device* mDevice = nullptr;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
index 9f48e170678..b741d7b901d 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
@@ -16,13 +16,21 @@
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
+#include "dawn_native/vulkan/VulkanError.h"
#include <spirv_cross.hpp>
namespace dawn_native { namespace vulkan {
- ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor) {
+ // static
+ ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor) {
+ std::unique_ptr<ShaderModule> module = std::make_unique<ShaderModule>(device, descriptor);
+ DAWN_TRY(module->Initialize(descriptor));
+ return module.release();
+ }
+
+ MaybeError ShaderModule::Initialize(const ShaderModuleDescriptor* descriptor) {
// Use SPIRV-Cross to extract info from the SPIRV even if Vulkan consumes SPIRV. We want to
// have a translation step eventually anyway.
spirv_cross::Compiler compiler(descriptor->code, descriptor->codeSize);
@@ -35,10 +43,10 @@ namespace dawn_native { namespace vulkan {
createInfo.codeSize = descriptor->codeSize * sizeof(uint32_t);
createInfo.pCode = descriptor->code;
- if (device->fn.CreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &mHandle) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
+ Device* device = ToBackend(GetDevice());
+ return CheckVkSuccess(
+ device->fn.CreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ "CreateShaderModule");
}
ShaderModule::~ShaderModule() {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
index 8c904d20ee7..f328dac6b16 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
@@ -18,6 +18,7 @@
#include "dawn_native/ShaderModule.h"
#include "common/vulkan_platform.h"
+#include "dawn_native/Error.h"
namespace dawn_native { namespace vulkan {
@@ -25,12 +26,16 @@ namespace dawn_native { namespace vulkan {
class ShaderModule : public ShaderModuleBase {
public:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ static ResultOrError<ShaderModule*> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor);
~ShaderModule();
VkShaderModule GetHandle() const;
private:
+ using ShaderModuleBase::ShaderModuleBase;
+ MaybeError Initialize(const ShaderModuleDescriptor* descriptor);
+
VkShaderModule mHandle = VK_NULL_HANDLE;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
index 38bb84eeedb..8ae2ccd4757 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
@@ -15,7 +15,8 @@
#include "dawn_native/vulkan/StagingBufferVk.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/MemoryAllocator.h"
+#include "dawn_native/vulkan/ResourceMemoryVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -34,22 +35,20 @@ namespace dawn_native { namespace vulkan {
createInfo.queueFamilyIndexCount = 0;
createInfo.pQueueFamilyIndices = 0;
- if (mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &mBuffer) !=
- VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("Unable to create staging buffer.");
- }
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &mBuffer),
+ "vkCreateBuffer"));
VkMemoryRequirements requirements;
mDevice->fn.GetBufferMemoryRequirements(mDevice->GetVkDevice(), mBuffer, &requirements);
- if (!mDevice->GetMemoryAllocator()->Allocate(requirements, true, &mAllocation)) {
- return DAWN_DEVICE_LOST_ERROR("Unable to allocate memory for staging buffer.");
- }
+ DAWN_TRY_ASSIGN(mAllocation, mDevice->AllocateMemory(requirements, true));
- if (mDevice->fn.BindBufferMemory(mDevice->GetVkDevice(), mBuffer, mAllocation.GetMemory(),
- mAllocation.GetMemoryOffset()) != VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("Unable to attach memory to the staging buffer.");
- }
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.BindBufferMemory(mDevice->GetVkDevice(), mBuffer,
+ ToBackend(mAllocation.GetResourceHeap())->GetMemory(),
+ mAllocation.GetOffset()),
+ "vkBindBufferMemory"));
mMappedPointer = mAllocation.GetMappedPointer();
if (mMappedPointer == nullptr) {
@@ -62,7 +61,7 @@ namespace dawn_native { namespace vulkan {
StagingBuffer::~StagingBuffer() {
mMappedPointer = nullptr;
mDevice->GetFencedDeleter()->DeleteWhenUnused(mBuffer);
- mDevice->GetMemoryAllocator()->Free(&mAllocation);
+ mDevice->DeallocateMemory(mAllocation);
}
VkBuffer StagingBuffer::GetBufferHandle() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.h
index 618c5ed7ffc..1106a2723d0 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.h
@@ -15,8 +15,9 @@
#ifndef DAWNNATIVE_STAGINGBUFFERVK_H_
#define DAWNNATIVE_STAGINGBUFFERVK_H_
+#include "common/vulkan_platform.h"
+#include "dawn_native/ResourceMemoryAllocation.h"
#include "dawn_native/StagingBuffer.h"
-#include "dawn_native/vulkan/MemoryAllocator.h"
namespace dawn_native { namespace vulkan {
@@ -34,7 +35,7 @@ namespace dawn_native { namespace vulkan {
private:
Device* mDevice;
VkBuffer mBuffer;
- DeviceMemoryAllocation mAllocation;
+ ResourceMemoryAllocation mAllocation;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
index 570760d15a4..b465bd0a388 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
@@ -19,6 +19,11 @@
namespace dawn_native { namespace vulkan {
+ // static
+ SwapChain* SwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return new SwapChain(device, descriptor);
+ }
+
SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
: SwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
@@ -46,7 +51,7 @@ namespace dawn_native { namespace vulkan {
return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture);
}
- void SwapChain::OnBeforePresent(TextureBase* texture) {
+ MaybeError SwapChain::OnBeforePresent(TextureBase* texture) {
Device* device = ToBackend(GetDevice());
// Perform the necessary pipeline barriers for the texture to be used with the usage
@@ -54,7 +59,9 @@ namespace dawn_native { namespace vulkan {
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
ToBackend(texture)->TransitionUsageNow(recordingContext, mTextureUsage);
- device->SubmitPendingCommands();
+ DAWN_TRY(device->SubmitPendingCommands());
+
+ return {};
}
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
index 190346ceffa..339d9da0096 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
@@ -25,12 +25,14 @@ namespace dawn_native { namespace vulkan {
class SwapChain : public SwapChainBase {
public:
- SwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ static SwapChain* Create(Device* device, const SwapChainDescriptor* descriptor);
~SwapChain();
protected:
+ SwapChain(Device* device, const SwapChainDescriptor* descriptor);
+
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- void OnBeforePresent(TextureBase* texture) override;
+ MaybeError OnBeforePresent(TextureBase* texture) override;
private:
dawn::TextureUsage mTextureUsage;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
index a37e300a03c..0cd4d053ca4 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
@@ -26,6 +26,7 @@
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/StagingBufferVk.h"
#include "dawn_native/vulkan/UtilsVulkan.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -395,8 +396,31 @@ namespace dawn_native { namespace vulkan {
return {};
}
- Texture::Texture(Device* device, const TextureDescriptor* descriptor)
- : TextureBase(device, descriptor, TextureState::OwnedInternal) {
+ // static
+ ResultOrError<Texture*> Texture::Create(Device* device, const TextureDescriptor* descriptor) {
+ std::unique_ptr<Texture> texture =
+ std::make_unique<Texture>(device, descriptor, TextureState::OwnedInternal);
+ DAWN_TRY(texture->InitializeAsInternalTexture());
+ return texture.release();
+ }
+
+ // static
+ ResultOrError<Texture*> Texture::CreateFromExternal(Device* device,
+ const ExternalImageDescriptor* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores) {
+ std::unique_ptr<Texture> texture =
+ std::make_unique<Texture>(device, textureDescriptor, TextureState::OwnedInternal);
+ DAWN_TRY(texture->InitializeFromExternal(
+ descriptor, signalSemaphore, externalMemoryAllocation, std::move((waitSemaphores))));
+ return texture.release();
+ }
+
+ MaybeError Texture::InitializeAsInternalTexture() {
+ Device* device = ToBackend(GetDevice());
+
// Create the Vulkan image "container". We don't need to check that the format supports the
// combination of sample, usage etc. because validation should have been done in the Dawn
// frontend already based on the minimum supported formats in the Vulkan spec
@@ -428,29 +452,30 @@ namespace dawn_native { namespace vulkan {
// also required for the implementation of robust resource initialization.
createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- if (device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &mHandle) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ "CreateImage"));
// Create the image memory and associate it with the container
VkMemoryRequirements requirements;
device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
if (!device->GetMemoryAllocator()->Allocate(requirements, false, &mMemoryAllocation)) {
- ASSERT(false);
+ return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate texture");
}
- if (device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
- mMemoryAllocation.GetMemory(),
- mMemoryAllocation.GetMemoryOffset()) != VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(CheckVkSuccess(device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
+ mMemoryAllocation.GetMemory(),
+ mMemoryAllocation.GetMemoryOffset()),
+ "BindImageMemory"));
+
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- device->ConsumedError(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
- 0, GetNumMipLevels(), 0, GetArrayLayers(),
- TextureBase::ClearValue::NonZero));
+ DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(), 0,
+ GetNumMipLevels(), 0, GetArrayLayers(),
+ TextureBase::ClearValue::NonZero));
}
+
+ return {};
}
// With this constructor, the lifetime of the resource is externally managed.
@@ -458,18 +483,14 @@ namespace dawn_native { namespace vulkan {
: TextureBase(device, descriptor, TextureState::OwnedExternal), mHandle(nativeImage) {
}
- // Internally managed, but imported from file descriptor
- Texture::Texture(Device* device,
- const ExternalImageDescriptor* descriptor,
- const TextureDescriptor* textureDescriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores)
- : TextureBase(device, textureDescriptor, TextureState::OwnedInternal),
- mExternalAllocation(externalMemoryAllocation),
- mExternalState(ExternalState::PendingAcquire),
- mSignalSemaphore(signalSemaphore),
- mWaitRequirements(std::move(waitSemaphores)) {
+ // Internally managed, but imported from external handle
+ MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptor* descriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores) {
+ mExternalState = ExternalState::PendingAcquire;
+ Device* device = ToBackend(GetDevice());
+
VkImageCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
createInfo.pNext = nullptr;
@@ -494,10 +515,9 @@ namespace dawn_native { namespace vulkan {
// also required for the implementation of robust resource initialization.
createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- if (device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &mHandle) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ "CreateImage"));
// Create the image memory and associate it with the container
VkMemoryRequirements requirements;
@@ -505,15 +525,21 @@ namespace dawn_native { namespace vulkan {
ASSERT(requirements.size <= descriptor->allocationSize);
- if (device->fn.BindImageMemory(device->GetVkDevice(), mHandle, mExternalAllocation, 0) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
+ "BindImageMemory (external)"));
// Don't clear imported texture if already cleared
if (descriptor->isCleared) {
- SetIsSubresourceContentInitialized(0, 1, 0, 1);
+ SetIsSubresourceContentInitialized(true, 0, 1, 0, 1);
}
+
+ // Success, acquire all the external objects.
+ mExternalAllocation = externalMemoryAllocation;
+ mSignalSemaphore = signalSemaphore;
+ mWaitRequirements = std::move(waitSemaphores);
+
+ return {};
}
MaybeError Texture::SignalAndDestroy(VkSemaphore* outSignalSemaphore) {
@@ -536,7 +562,7 @@ namespace dawn_native { namespace vulkan {
// Queue submit to signal we are done with the texture
device->GetPendingRecordingContext()->signalSemaphores.push_back(mSignalSemaphore);
- device->SubmitPendingCommands();
+ DAWN_TRY(device->SubmitPendingCommands());
// Write out the signal semaphore
*outSignalSemaphore = mSignalSemaphore;
@@ -670,8 +696,9 @@ namespace dawn_native { namespace vulkan {
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
clearDepthStencilValue, 1, &range);
} else {
+ float fClearColor = static_cast<float>(clearColor);
VkClearColorValue clearColorValue = {
- {clearColor, clearColor, clearColor, clearColor}};
+ {fClearColor, fClearColor, fClearColor, fClearColor}};
device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
&clearColorValue, 1, &range);
@@ -687,10 +714,10 @@ namespace dawn_native { namespace vulkan {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
}
uint32_t bufferSize = static_cast<uint32_t>(bufferSize64);
- DynamicUploader* uploader = nullptr;
- DAWN_TRY_ASSIGN(uploader, device->GetDynamicUploader());
+ DynamicUploader* uploader = device->GetDynamicUploader();
UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, uploader->Allocate(bufferSize));
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial()));
std::fill(reinterpret_cast<uint32_t*>(uploadHandle.mappedBuffer),
reinterpret_cast<uint32_t*>(uploadHandle.mappedBuffer + bufferSize),
clearColor);
@@ -701,25 +728,30 @@ namespace dawn_native { namespace vulkan {
bufferCopy.offset = uploadHandle.startOffset;
bufferCopy.rowPitch = rowPitch;
- dawn_native::TextureCopy textureCopy;
- textureCopy.texture = this;
- textureCopy.origin = {0, 0, 0};
- textureCopy.mipLevel = baseMipLevel;
- textureCopy.arrayLayer = baseArrayLayer;
-
Extent3D copySize = {GetSize().width, GetSize().height, 1};
- VkBufferImageCopy region =
- ComputeBufferImageCopyRegion(bufferCopy, textureCopy, copySize);
-
- // copy the clear buffer to the texture image
- device->fn.CmdCopyBufferToImage(
- recordingContext->commandBuffer,
- ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(), GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+ for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
+ ++layer) {
+ dawn_native::TextureCopy textureCopy;
+ textureCopy.texture = this;
+ textureCopy.origin = {0, 0, 0};
+ textureCopy.mipLevel = level;
+ textureCopy.arrayLayer = layer;
+
+ VkBufferImageCopy region =
+ ComputeBufferImageCopyRegion(bufferCopy, textureCopy, copySize);
+
+ // copy the clear buffer to the texture image
+ device->fn.CmdCopyBufferToImage(
+ recordingContext->commandBuffer,
+ ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(), GetHandle(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+ }
+ }
}
if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
+ SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
layerCount);
device->IncrementLazyClearCountForTesting();
}
@@ -750,10 +782,16 @@ namespace dawn_native { namespace vulkan {
}
}
- // TODO(jiawei.shao@intel.com): create texture view by TextureViewDescriptor
- TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
- : TextureViewBase(texture, descriptor) {
- Device* device = ToBackend(texture->GetDevice());
+ // static
+ ResultOrError<TextureView*> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ std::unique_ptr<TextureView> view = std::make_unique<TextureView>(texture, descriptor);
+ DAWN_TRY(view->Initialize(descriptor));
+ return view.release();
+ }
+
+ MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+ Device* device = ToBackend(GetTexture()->GetDevice());
VkImageViewCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
@@ -770,10 +808,9 @@ namespace dawn_native { namespace vulkan {
createInfo.subresourceRange.baseArrayLayer = descriptor->baseArrayLayer;
createInfo.subresourceRange.layerCount = descriptor->arrayLayerCount;
- if (device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &mHandle) !=
- VK_SUCCESS) {
- ASSERT(false);
- }
+ return CheckVkSuccess(
+ device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ "CreateImageView");
}
TextureView::~TextureView() {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
index 5d049e8cf1d..dd1d5f5393a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
@@ -35,22 +35,21 @@ namespace dawn_native { namespace vulkan {
class Texture : public TextureBase {
public:
- enum class ExternalState {
- InternalOnly,
- PendingAcquire,
- Acquired,
- PendingRelease,
- Released
- };
+ // Used to create a regular texture from a descriptor.
+ static ResultOrError<Texture*> Create(Device* device, const TextureDescriptor* descriptor);
+
+ // Used to create a texture from Vulkan external memory objects.
+ // Ownership of semaphores and the memory allocation is taken only if the creation is
+ // a success.
+ static ResultOrError<Texture*> CreateFromExternal(
+ Device* device,
+ const ExternalImageDescriptor* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores);
- Texture(Device* device, const TextureDescriptor* descriptor);
Texture(Device* device, const TextureDescriptor* descriptor, VkImage nativeImage);
- Texture(Device* device,
- const ExternalImageDescriptor* descriptor,
- const TextureDescriptor* textureDescriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores);
~Texture();
VkImage GetHandle() const;
@@ -70,6 +69,13 @@ namespace dawn_native { namespace vulkan {
MaybeError SignalAndDestroy(VkSemaphore* outSignalSemaphore);
private:
+ using TextureBase::TextureBase;
+ MaybeError InitializeAsInternalTexture();
+ MaybeError InitializeFromExternal(const ExternalImageDescriptor* descriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores);
+
void DestroyImpl() override;
MaybeError ClearTexture(CommandRecordingContext* recordingContext,
uint32_t baseMipLevel,
@@ -82,24 +88,36 @@ namespace dawn_native { namespace vulkan {
DeviceMemoryAllocation mMemoryAllocation;
VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
+ enum class ExternalState {
+ InternalOnly,
+ PendingAcquire,
+ Acquired,
+ PendingRelease,
+ Released
+ };
ExternalState mExternalState = ExternalState::InternalOnly;
ExternalState mLastExternalState = ExternalState::InternalOnly;
+
VkSemaphore mSignalSemaphore = VK_NULL_HANDLE;
std::vector<VkSemaphore> mWaitRequirements;
// A usage of none will make sure the texture is transitioned before its first use as
- // required by the spec.
+ // required by the Vulkan spec.
dawn::TextureUsage mLastUsage = dawn::TextureUsage::None;
};
class TextureView : public TextureViewBase {
public:
- TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+ static ResultOrError<TextureView*> Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
~TextureView();
VkImageView GetHandle() const;
private:
+ using TextureViewBase::TextureViewBase;
+ MaybeError Initialize(const TextureViewDescriptor* descriptor);
+
VkImageView mHandle = VK_NULL_HANDLE;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
index 0d36776b73a..d3bbe67d8e5 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
@@ -103,6 +103,12 @@ namespace dawn_native { namespace vulkan {
GET_INSTANCE_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
}
+#ifdef VK_USE_PLATFORM_FUCHSIA
+ if (globalInfo.fuchsiaImagePipeSurface) {
+ GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
+ }
+#endif
+
return {};
}
@@ -250,6 +256,18 @@ namespace dawn_native { namespace vulkan {
GET_DEVICE_PROC(GetSemaphoreFdKHR);
}
+#if VK_USE_PLATFORM_FUCHSIA
+ if (usedKnobs.externalMemoryZirconHandle) {
+ GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
+ GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
+ }
+
+ if (usedKnobs.externalSemaphoreZirconHandle) {
+ GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
+ GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
+ }
+#endif
+
if (usedKnobs.swapchain) {
GET_DEVICE_PROC(CreateSwapchainKHR);
GET_DEVICE_PROC(DestroySwapchainKHR);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
index b1f24f191f9..28e4096cbe3 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
@@ -106,6 +106,11 @@ namespace dawn_native { namespace vulkan {
PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR
GetPhysicalDeviceSparseImageFormatProperties2KHR = nullptr;
+#ifdef VK_USE_PLATFORM_FUCHSIA
+ // FUCHSIA_image_pipe_surface
+ PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA = nullptr;
+#endif
+
// ---------- Device procs
// Core Vulkan 1.0
@@ -248,6 +253,17 @@ namespace dawn_native { namespace vulkan {
// VK_KHR_external_semaphore_fd
PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR = nullptr;
PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR = nullptr;
+
+#if VK_USE_PLATFORM_FUCHSIA
+ // VK_FUCHSIA_external_memory
+ PFN_vkGetMemoryZirconHandleFUCHSIA GetMemoryZirconHandleFUCHSIA = nullptr;
+ PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA GetMemoryZirconHandlePropertiesFUCHSIA =
+ nullptr;
+
+ // VK_FUCHSIA_external_semaphore
+ PFN_vkImportSemaphoreZirconHandleFUCHSIA ImportSemaphoreZirconHandleFUCHSIA = nullptr;
+ PFN_vkGetSemaphoreZirconHandleFUCHSIA GetSemaphoreZirconHandleFUCHSIA = nullptr;
+#endif
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
index f4f0284154f..18cefb272c9 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
@@ -27,6 +27,22 @@ namespace {
bool IsExtensionName(const VkExtensionProperties& extension, const char* name) {
return strncmp(extension.extensionName, name, VK_MAX_EXTENSION_NAME_SIZE) == 0;
}
+
+ bool EnumerateInstanceExtensions(const char* layerName,
+ const dawn_native::vulkan::VulkanFunctions& vkFunctions,
+ std::vector<VkExtensionProperties>* extensions) {
+ uint32_t count = 0;
+ VkResult result =
+ vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr);
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return false;
+ }
+ extensions->resize(count);
+ result =
+ vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, extensions->data());
+ return (result == VK_SUCCESS);
+ }
+
} // namespace
namespace dawn_native { namespace vulkan {
@@ -34,6 +50,7 @@ namespace dawn_native { namespace vulkan {
const char kLayerNameLunargStandardValidation[] = "VK_LAYER_LUNARG_standard_validation";
const char kLayerNameLunargVKTrace[] = "VK_LAYER_LUNARG_vktrace";
const char kLayerNameRenderDocCapture[] = "VK_LAYER_RENDERDOC_Capture";
+ const char kLayerNameFuchsiaImagePipeSwapchain[] = "VK_LAYER_FUCHSIA_imagepipe_swapchain";
const char kExtensionNameExtDebugMarker[] = "VK_EXT_debug_marker";
const char kExtensionNameExtDebugReport[] = "VK_EXT_debug_report";
@@ -42,10 +59,12 @@ namespace dawn_native { namespace vulkan {
const char kExtensionNameKhrExternalMemoryCapabilities[] =
"VK_KHR_external_memory_capabilities";
const char kExtensionNameKhrExternalMemoryFD[] = "VK_KHR_external_memory_fd";
+ const char kExtensionNameFuchsiaExternalMemory[] = "VK_FUCHSIA_external_memory";
const char kExtensionNameKhrExternalSemaphore[] = "VK_KHR_external_semaphore";
const char kExtensionNameKhrExternalSemaphoreCapabilities[] =
"VK_KHR_external_semaphore_capabilities";
const char kExtensionNameKhrExternalSemaphoreFD[] = "VK_KHR_external_semaphore_fd";
+ const char kExtensionNameFuchsiaExternalSemaphore[] = "VK_FUCHSIA_external_semaphore";
const char kExtensionNameKhrGetPhysicalDeviceProperties2[] =
"VK_KHR_get_physical_device_properties2";
const char kExtensionNameKhrSurface[] = "VK_KHR_surface";
@@ -54,6 +73,8 @@ namespace dawn_native { namespace vulkan {
const char kExtensionNameKhrWin32Surface[] = "VK_KHR_win32_surface";
const char kExtensionNameKhrXcbSurface[] = "VK_KHR_xcb_surface";
const char kExtensionNameKhrXlibSurface[] = "VK_KHR_xlib_surface";
+ const char kExtensionNameFuchsiaImagePipeSurface[] = "VK_FUCHSIA_imagepipe_surface";
+ const char kExtensionNameKhrMaintenance1[] = "VK_KHR_maintenance1";
ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const Backend& backend) {
VulkanGlobalInfo info = {};
@@ -86,22 +107,18 @@ namespace dawn_native { namespace vulkan {
if (IsLayerName(layer, kLayerNameRenderDocCapture)) {
info.renderDocCapture = true;
}
+ // Technical note: Fuchsia implements the swapchain through
+ // a layer (VK_LAYER_FUCHSIA_image_pipe_swapchain), which adds
+ // an instance extensions (VK_FUCHSIA_image_surface) to all ICDs.
+ if (IsLayerName(layer, kLayerNameFuchsiaImagePipeSwapchain)) {
+ info.fuchsiaImagePipeSwapchain = true;
+ }
}
}
// Gather the info about the instance extensions
{
- uint32_t count = 0;
- VkResult result =
- vkFunctions.EnumerateInstanceExtensionProperties(nullptr, &count, nullptr);
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumerateInstanceExtensionProperties");
- }
-
- info.extensions.resize(count);
- result = vkFunctions.EnumerateInstanceExtensionProperties(nullptr, &count,
- info.extensions.data());
- if (result != VK_SUCCESS) {
+ if (!EnumerateInstanceExtensions(nullptr, vkFunctions, &info.extensions)) {
return DAWN_DEVICE_LOST_ERROR("vkEnumerateInstanceExtensionProperties");
}
@@ -136,6 +153,27 @@ namespace dawn_native { namespace vulkan {
if (IsExtensionName(extension, kExtensionNameKhrXlibSurface)) {
info.xlibSurface = true;
}
+ if (IsExtensionName(extension, kExtensionNameFuchsiaImagePipeSurface)) {
+ info.fuchsiaImagePipeSurface = true;
+ }
+ }
+ }
+
+ // Specific handling for the Fuchsia swapchain surface creation extension
+ // which is normally part of the Fuchsia-specific swapchain layer.
+ if (info.fuchsiaImagePipeSwapchain && !info.fuchsiaImagePipeSurface) {
+ std::vector<VkExtensionProperties> layer_extensions;
+ if (!EnumerateInstanceExtensions(kLayerNameFuchsiaImagePipeSwapchain, vkFunctions,
+ &layer_extensions)) {
+ return DAWN_DEVICE_LOST_ERROR("vkEnumerateInstanceExtensionProperties");
+ }
+
+ for (const auto& extension : layer_extensions) {
+ if (IsExtensionName(extension, kExtensionNameFuchsiaImagePipeSurface)) {
+ info.fuchsiaImagePipeSurface = true;
+ // For now, copy this to the global extension list.
+ info.extensions.push_back(extension);
+ }
}
}
@@ -249,15 +287,24 @@ namespace dawn_native { namespace vulkan {
if (IsExtensionName(extension, kExtensionNameKhrExternalMemoryFD)) {
info.externalMemoryFD = true;
}
+ if (IsExtensionName(extension, kExtensionNameFuchsiaExternalMemory)) {
+ info.externalMemoryZirconHandle = true;
+ }
if (IsExtensionName(extension, kExtensionNameKhrExternalSemaphore)) {
info.externalSemaphore = true;
}
if (IsExtensionName(extension, kExtensionNameKhrExternalSemaphoreFD)) {
info.externalSemaphoreFD = true;
}
+ if (IsExtensionName(extension, kExtensionNameFuchsiaExternalSemaphore)) {
+ info.externalSemaphoreZirconHandle = true;
+ }
if (IsExtensionName(extension, kExtensionNameKhrSwapchain)) {
info.swapchain = true;
}
+ if (IsExtensionName(extension, kExtensionNameKhrMaintenance1)) {
+ info.maintenance1 = true;
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
index 48bcd9a3dec..2da3466ba21 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
@@ -28,6 +28,7 @@ namespace dawn_native { namespace vulkan {
extern const char kLayerNameLunargStandardValidation[];
extern const char kLayerNameLunargVKTrace[];
extern const char kLayerNameRenderDocCapture[];
+ extern const char kLayerNameFuchsiaImagePipeSwapchain[];
extern const char kExtensionNameExtDebugMarker[];
extern const char kExtensionNameExtDebugReport[];
@@ -35,9 +36,11 @@ namespace dawn_native { namespace vulkan {
extern const char kExtensionNameKhrExternalMemory[];
extern const char kExtensionNameKhrExternalMemoryCapabilities[];
extern const char kExtensionNameKhrExternalMemoryFD[];
+ extern const char kExtensionNameFuchsiaExternalMemory[];
extern const char kExtensionNameKhrExternalSemaphore[];
extern const char kExtensionNameKhrExternalSemaphoreCapabilities[];
extern const char kExtensionNameKhrExternalSemaphoreFD[];
+ extern const char kExtensionNameFuchsiaExternalSemaphore[];
extern const char kExtensionNameKhrGetPhysicalDeviceProperties2[];
extern const char kExtensionNameKhrSurface[];
extern const char kExtensionNameKhrSwapchain[];
@@ -45,6 +48,8 @@ namespace dawn_native { namespace vulkan {
extern const char kExtensionNameKhrWin32Surface[];
extern const char kExtensionNameKhrXcbSurface[];
extern const char kExtensionNameKhrXlibSurface[];
+ extern const char kExtensionNameFuchsiaImagePipeSurface[];
+ extern const char kExtensionNameKhrMaintenance1[];
// Global information - gathered before the instance is created
struct VulkanGlobalKnobs {
@@ -52,6 +57,7 @@ namespace dawn_native { namespace vulkan {
bool standardValidation = false;
bool vktrace = false;
bool renderDocCapture = false;
+ bool fuchsiaImagePipeSwapchain = false;
// Extensions
bool debugReport = false;
@@ -64,6 +70,7 @@ namespace dawn_native { namespace vulkan {
bool win32Surface = false;
bool xcbSurface = false;
bool xlibSurface = false;
+ bool fuchsiaImagePipeSurface = false;
};
struct VulkanGlobalInfo : VulkanGlobalKnobs {
@@ -81,9 +88,12 @@ namespace dawn_native { namespace vulkan {
bool debugMarker = false;
bool externalMemory = false;
bool externalMemoryFD = false;
+ bool externalMemoryZirconHandle = false;
bool externalSemaphore = false;
bool externalSemaphoreFD = false;
+ bool externalSemaphoreZirconHandle = false;
bool swapchain = false;
+ bool maintenance1 = false;
};
struct VulkanDeviceInfo : VulkanDeviceKnobs {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
new file mode 100644
index 00000000000..1788f70cad0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
@@ -0,0 +1,110 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/vulkan/AdapterVk.h"
+#include "dawn_native/vulkan/BackendVk.h"
+#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
+#include "dawn_native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn_native { namespace vulkan { namespace external_memory {
+
+ Service::Service(Device* device) : mDevice(device) {
+ const VulkanDeviceInfo& deviceInfo = mDevice->GetDeviceInfo();
+ const VulkanGlobalInfo& globalInfo =
+ ToBackend(mDevice->GetAdapter())->GetBackend()->GetGlobalInfo();
+
+ mSupported = globalInfo.getPhysicalDeviceProperties2 &&
+ globalInfo.externalMemoryCapabilities && deviceInfo.externalMemory &&
+ deviceInfo.externalMemoryFD;
+ }
+
+ Service::~Service() = default;
+
+ bool Service::Supported(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ // Early out before we try using extension functions
+ if (!mSupported) {
+ return false;
+ }
+
+ VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
+ externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
+ externalFormatInfo.pNext = nullptr;
+ externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA;
+
+ VkPhysicalDeviceImageFormatInfo2 formatInfo;
+ formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
+ formatInfo.pNext = &externalFormatInfo;
+ formatInfo.format = format;
+ formatInfo.type = type;
+ formatInfo.tiling = tiling;
+ formatInfo.usage = usage;
+ formatInfo.flags = flags;
+
+ VkExternalImageFormatProperties externalFormatProperties;
+ externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
+ externalFormatProperties.pNext = nullptr;
+
+ VkImageFormatProperties2 formatProperties;
+ formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
+ formatProperties.pNext = &externalFormatProperties;
+
+ VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2KHR(
+ ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
+
+ // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
+ if (result != VK_SUCCESS) {
+ return false;
+ }
+
+ // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
+ VkFlags memoryFlags =
+ externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
+ return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) &&
+ !(memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR);
+ }
+
+ ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ VkDeviceSize allocationSize,
+ uint32_t memoryTypeIndex) {
+ if (handle == ZX_HANDLE_INVALID) {
+ return DAWN_VALIDATION_ERROR("Trying to import memory with invalid handle");
+ }
+
+ VkImportMemoryZirconHandleInfoFUCHSIA importMemoryHandleInfo;
+ importMemoryHandleInfo.sType =
+ VK_STRUCTURE_TYPE_TEMP_MEMORY_ZIRCON_HANDLE_PROPERTIES_FUCHSIA;
+ importMemoryHandleInfo.pNext = nullptr;
+ importMemoryHandleInfo.handleType =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA;
+ importMemoryHandleInfo.handle = handle;
+
+ VkMemoryAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ allocateInfo.pNext = &importMemoryHandleInfo;
+ allocateInfo.allocationSize = allocationSize;
+ allocateInfo.memoryTypeIndex = memoryTypeIndex;
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+ nullptr, &allocatedMemory),
+ "vkAllocateMemory"));
+ return allocatedMemory;
+ }
+
+}}} // namespace dawn_native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
new file mode 100644
index 00000000000..81fb9fd5ecd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
@@ -0,0 +1,138 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/vulkan/AdapterVk.h"
+#include "dawn_native/vulkan/BackendVk.h"
+#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
+#include "dawn_native/vulkan/external_semaphore/SemaphoreService.h"
+
+namespace dawn_native { namespace vulkan { namespace external_semaphore {
+
+ Service::Service(Device* device) : mDevice(device) {
+ const VulkanDeviceInfo& deviceInfo = mDevice->GetDeviceInfo();
+ const VulkanGlobalInfo& globalInfo =
+ ToBackend(mDevice->GetAdapter())->GetBackend()->GetGlobalInfo();
+
+ mSupported = globalInfo.getPhysicalDeviceProperties2 &&
+ globalInfo.externalSemaphoreCapabilities && deviceInfo.externalSemaphore &&
+ deviceInfo.externalSemaphoreFD;
+
+ // Early out before we try using extension functions
+ if (!mSupported) {
+ return;
+ }
+
+ VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
+ semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
+ semaphoreInfo.pNext = nullptr;
+ semaphoreInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
+
+ VkExternalSemaphorePropertiesKHR semaphoreProperties;
+ semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
+ semaphoreProperties.pNext = nullptr;
+
+ mDevice->fn.GetPhysicalDeviceExternalSemaphorePropertiesKHR(
+ ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &semaphoreInfo,
+ &semaphoreProperties);
+
+ VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+ mSupported =
+ mSupported &&
+ ((semaphoreProperties.externalSemaphoreFeatures & requiredFlags) == requiredFlags);
+ }
+
+ Service::~Service() = default;
+
+ bool Service::Supported() {
+ return mSupported;
+ }
+
+ ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+ if (handle == ZX_HANDLE_INVALID) {
+ return DAWN_VALIDATION_ERROR("Trying to import semaphore with invalid handle");
+ }
+
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ VkSemaphoreCreateInfo info;
+ info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ info.pNext = nullptr;
+ info.flags = 0;
+
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &semaphore),
+ "vkCreateSemaphore"));
+
+ VkImportSemaphoreZirconHandleInfoFUCHSIA importSempahoreHandleInfo;
+ importSempahoreHandleInfo.sType =
+ VK_STRUCTURE_TYPE_TEMP_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA;
+ importSempahoreHandleInfo.pNext = nullptr;
+ importSempahoreHandleInfo.semaphore = semaphore;
+ importSempahoreHandleInfo.flags = 0;
+ importSempahoreHandleInfo.handleType =
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
+ importSempahoreHandleInfo.handle = handle;
+
+ MaybeError status = CheckVkSuccess(mDevice->fn.ImportSemaphoreZirconHandleFUCHSIA(
+ mDevice->GetVkDevice(), &importSempahoreHandleInfo),
+ "vkImportSemaphoreZirconHandleFUCHSIA");
+
+ if (status.IsError()) {
+ mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
+ DAWN_TRY(std::move(status));
+ }
+
+ return semaphore;
+ }
+
+ ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+ VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
+ exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
+ exportSemaphoreInfo.pNext = nullptr;
+ exportSemaphoreInfo.handleTypes =
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
+
+ VkSemaphoreCreateInfo semaphoreCreateInfo;
+ semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
+ semaphoreCreateInfo.flags = 0;
+
+ VkSemaphore signalSemaphore;
+ DAWN_TRY(
+ CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
+ nullptr, &signalSemaphore),
+ "vkCreateSemaphore"));
+ return signalSemaphore;
+ }
+
+ ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+ VkSemaphoreGetZirconHandleInfoFUCHSIA semaphoreGetHandleInfo;
+ semaphoreGetHandleInfo.sType =
+ VK_STRUCTURE_TYPE_TEMP_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA;
+ semaphoreGetHandleInfo.pNext = nullptr;
+ semaphoreGetHandleInfo.semaphore = semaphore;
+ semaphoreGetHandleInfo.handleType =
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
+
+ zx_handle_t handle = ZX_HANDLE_INVALID;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.GetSemaphoreZirconHandleFUCHSIA(
+ mDevice->GetVkDevice(), &semaphoreGetHandleInfo, &handle),
+ "VkSemaphoreGetZirconHandleInfoFUCHSIA"));
+
+ ASSERT(handle != ZX_HANDLE_INVALID);
+ return handle;
+ }
+
+}}} // namespace dawn_native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp b/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
index f7b949122f2..0cbbcd6c1ea 100644
--- a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
@@ -33,7 +33,7 @@ namespace dawn_wire {
return client::GetProcs();
}
- const char* WireClient::HandleCommands(const char* commands, size_t size) {
+ const volatile char* WireClient::HandleCommands(const volatile char* commands, size_t size) {
return mImpl->HandleCommands(commands, size);
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp b/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
index 45ef9ca4b5b..18966471268 100644
--- a/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
@@ -28,7 +28,7 @@ namespace dawn_wire {
mImpl.reset();
}
- const char* WireServer::HandleCommands(const char* commands, size_t size) {
+ const volatile char* WireServer::HandleCommands(const volatile char* commands, size_t size) {
return mImpl->HandleCommands(commands, size);
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
index df6ed53535e..12ecf896468 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
@@ -285,8 +285,9 @@ namespace dawn_wire { namespace client {
void* userdata) {
Fence* fence = reinterpret_cast<Fence*>(cFence);
if (value > fence->signaledValue) {
- fence->device->HandleError(DAWN_ERROR_TYPE_VALIDATION,
- "Value greater than fence signaled value");
+ ClientDeviceInjectError(reinterpret_cast<DawnDevice>(fence->device),
+ DAWN_ERROR_TYPE_VALIDATION,
+ "Value greater than fence signaled value");
callback(DAWN_FENCE_COMPLETION_STATUS_ERROR, userdata);
return;
}
@@ -394,14 +395,15 @@ namespace dawn_wire { namespace client {
Fence* fence = reinterpret_cast<Fence*>(cFence);
Queue* queue = reinterpret_cast<Queue*>(cQueue);
if (fence->queue != queue) {
- fence->device->HandleError(
- DAWN_ERROR_TYPE_VALIDATION,
- "Fence must be signaled on the queue on which it was created.");
+ ClientDeviceInjectError(reinterpret_cast<DawnDevice>(fence->device),
+ DAWN_ERROR_TYPE_VALIDATION,
+ "Fence must be signaled on the queue on which it was created.");
return;
}
if (signalValue <= fence->signaledValue) {
- fence->device->HandleError(DAWN_ERROR_TYPE_VALIDATION,
- "Fence value less than or equal to signaled value");
+ ClientDeviceInjectError(reinterpret_cast<DawnDevice>(fence->device),
+ DAWN_ERROR_TYPE_VALIDATION,
+ "Fence value less than or equal to signaled value");
return;
}
fence->signaledValue = signalValue;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.h b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
index f7b06871f21..c1af4276339 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
@@ -33,7 +33,7 @@ namespace dawn_wire { namespace client {
Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService);
~Client();
- const char* HandleCommands(const char* commands, size_t size);
+ const volatile char* HandleCommands(const volatile char* commands, size_t size);
ReservedTexture ReserveTexture(DawnDevice device);
void* GetCmdSpace(size_t size) {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
index f7b6ea1e683..09758ea9f55 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
@@ -16,6 +16,8 @@
#include "dawn_wire/client/Client.h"
#include "dawn_wire/client/Device.h"
+#include <limits>
+
namespace dawn_wire { namespace client {
bool Client::DoDeviceUncapturedErrorCallback(DawnErrorType errorType, const char* message) {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp
index 92542d25ba6..bfd66344609 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp
@@ -16,6 +16,8 @@
#include "dawn_wire/WireClient.h"
#include "dawn_wire/client/Client.h"
+#include <cstring>
+
namespace dawn_wire { namespace client {
class InlineMemoryTransferService : public MemoryTransferService {
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.h b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
index efbb46730ab..0f901ad2fc0 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
@@ -36,7 +36,7 @@ namespace dawn_wire { namespace server {
Server* server;
// TODO(enga): ObjectHandle device;
// when the wire supports multiple devices.
- uint32_t requestSerial;
+ uint64_t requestSerial;
};
struct FenceCompletionUserdata {
@@ -53,7 +53,7 @@ namespace dawn_wire { namespace server {
MemoryTransferService* memoryTransferService);
~Server();
- const char* HandleCommands(const char* commands, size_t size);
+ const volatile char* HandleCommands(const volatile char* commands, size_t size);
bool InjectTexture(DawnTexture texture, uint32_t id, uint32_t generation);
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
index 4e86d3e5715..8713b57c27e 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
@@ -36,7 +36,11 @@ namespace dawn_wire { namespace server {
userdata->server = this;
userdata->requestSerial = requestSerial;
- return mProcs.devicePopErrorScope(cDevice, ForwardPopErrorScope, userdata);
+ bool success = mProcs.devicePopErrorScope(cDevice, ForwardPopErrorScope, userdata);
+ if (!success) {
+ delete userdata;
+ }
+ return success;
}
// static
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerInlineMemoryTransferService.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerInlineMemoryTransferService.cpp
index b512e6f8fe9..105dee49e18 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerInlineMemoryTransferService.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerInlineMemoryTransferService.cpp
@@ -16,6 +16,8 @@
#include "dawn_wire/WireServer.h"
#include "dawn_wire/server/Server.h"
+#include <cstring>
+
namespace dawn_wire { namespace server {
class InlineMemoryTransferService : public MemoryTransferService {
diff --git a/chromium/third_party/dawn/src/fuzzers/BUILD.gn b/chromium/third_party/dawn/src/fuzzers/BUILD.gn
index 2369f94d1cf..57bdcf3a4a9 100644
--- a/chromium/third_party/dawn/src/fuzzers/BUILD.gn
+++ b/chromium/third_party/dawn/src/fuzzers/BUILD.gn
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import("../../scripts/dawn_overrides_with_defaults.gni")
import("//build_overrides/build.gni")
+import("../../scripts/dawn_overrides_with_defaults.gni")
# We only have libfuzzer in Chromium builds but if we build fuzzer targets only
# there, we would risk breaking fuzzer targets all the time when making changes
@@ -127,7 +127,8 @@ dawn_fuzzer_test("dawn_wire_server_and_frontend_fuzzer") {
"${dawn_root}/:libdawn_native_static",
"${dawn_root}/:libdawn_wire_static",
"${dawn_root}/src/common",
- "${dawn_root}/src/dawn:libdawn_static",
+ "${dawn_root}/src/dawn:dawncpp",
+ "${dawn_root}/src/dawn:libdawn_proc",
]
additional_configs = [ "${dawn_root}/src/common:dawn_internal" ]
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_proc.h b/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
new file mode 100644
index 00000000000..ad0e393b248
--- /dev/null
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
@@ -0,0 +1,36 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_DAWN_PROC_H_
+#define DAWN_DAWN_PROC_H_
+
+#include "dawn/dawn.h"
+#include "dawn/dawn_proc_table.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Sets the static proctable used by libdawn_proc to implement the Dawn entrypoints. Passing NULL
+// for `procs` sets up the null proctable that contains only null function pointers. It is the
+// default value of the proctable. Setting the proctable back to null is good practice when you
+// are done using libdawn_proc since further usage will cause a segfault instead of calling an
+// unexpected function.
+DAWN_EXPORT void dawnProcSetProcs(const DawnProcTable* procs);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // DAWN_DAWN_PROC_H_
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
index 832af4b9297..ff83f722bf9 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
@@ -69,6 +69,7 @@ typedef struct {
typedef struct {
id<MTLDevice> device = nil;
+ id<MTLCommandQueue> queue = nil;
} DawnWSIContextMetal;
#endif
diff --git a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
index 423b46ff183..b3125ed8f4e 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
@@ -16,6 +16,7 @@
#define DAWNNATIVE_DAWNNATIVE_H_
#include <dawn/dawn.h>
+#include <dawn/dawn_proc_table.h>
#include <dawn_native/dawn_native_export.h>
#include <string>
@@ -159,6 +160,9 @@ namespace dawn_native {
// Backdoor to get the number of lazy clears for testing
DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(DawnDevice device);
+
+ // Backdoor to get the order of the ProcMap for testing
+ DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
} // namespace dawn_native
#endif // DAWNNATIVE_DAWNNATIVE_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
index b5ee54d15cb..7d60c31a5c2 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
@@ -32,7 +32,7 @@ namespace dawn_wire {
class DAWN_WIRE_EXPORT CommandHandler {
public:
virtual ~CommandHandler() = default;
- virtual const char* HandleCommands(const char* commands, size_t size) = 0;
+ virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
};
} // namespace dawn_wire
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
index 458a5930c6e..42b1aa6c6d6 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
@@ -15,10 +15,12 @@
#ifndef DAWNWIRE_WIRECLIENT_H_
#define DAWNWIRE_WIRECLIENT_H_
-#include <memory>
-
+#include "dawn/dawn_proc_table.h"
#include "dawn_wire/Wire.h"
+#include <memory>
+#include <vector>
+
namespace dawn_wire {
namespace client {
@@ -44,7 +46,8 @@ namespace dawn_wire {
DawnDevice GetDevice() const;
DawnProcTable GetProcs() const;
- const char* HandleCommands(const char* commands, size_t size) override final;
+ const volatile char* HandleCommands(const volatile char* commands,
+ size_t size) override final;
ReservedTexture ReserveTexture(DawnDevice device);
@@ -118,8 +121,10 @@ namespace dawn_wire {
virtual ~WriteHandle();
};
};
- } // namespace client
+ // Backdoor to get the order of the ProcMap for testing
+ DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
+ } // namespace client
} // namespace dawn_wire
#endif // DAWNWIRE_WIRECLIENT_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
index f5ae1dcf401..e018b5bae47 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
@@ -19,6 +19,8 @@
#include "dawn_wire/Wire.h"
+struct DawnProcTable;
+
namespace dawn_wire {
namespace server {
@@ -38,7 +40,8 @@ namespace dawn_wire {
WireServer(const WireServerDescriptor& descriptor);
~WireServer();
- const char* HandleCommands(const char* commands, size_t size) override final;
+ const volatile char* HandleCommands(const volatile char* commands,
+ size_t size) override final;
bool InjectTexture(DawnTexture texture, uint32_t id, uint32_t generation);
diff --git a/chromium/third_party/dawn/src/utils/BackendBinding.h b/chromium/third_party/dawn/src/utils/BackendBinding.h
index b3529a2117e..26b8a827121 100644
--- a/chromium/third_party/dawn/src/utils/BackendBinding.h
+++ b/chromium/third_party/dawn/src/utils/BackendBinding.h
@@ -15,7 +15,7 @@
#ifndef UTILS_BACKENDBINDING_H_
#define UTILS_BACKENDBINDING_H_
-#include "dawn/dawncpp.h"
+#include "dawn/dawn.h"
#include "dawn_native/DawnNative.h"
struct GLFWwindow;
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
index 2da1f9fe76d..66be4dee0a4 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
+++ b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
@@ -78,7 +78,7 @@ namespace utils {
// Set defaults for the color state descriptors.
{
descriptor->colorStateCount = 1;
- descriptor->colorStates = &cColorStates[0];
+ descriptor->colorStates = cColorStates.data();
dawn::BlendDescriptor blend;
blend.operation = dawn::BlendOperation::Add;
@@ -90,8 +90,7 @@ namespace utils {
colorStateDescriptor.colorBlend = blend;
colorStateDescriptor.writeMask = dawn::ColorWriteMask::All;
for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- mColorStates[i] = colorStateDescriptor;
- cColorStates[i] = &mColorStates[i];
+ cColorStates[i] = colorStateDescriptor;
}
}
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
index 31f34a52e73..2be2f0ac92d 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
+++ b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
@@ -35,15 +35,17 @@ namespace utils {
public:
ComboRenderPipelineDescriptor(const dawn::Device& device);
- dawn::PipelineStageDescriptor cFragmentStage;
+ ComboRenderPipelineDescriptor(const ComboRenderPipelineDescriptor&) = delete;
+ ComboRenderPipelineDescriptor& operator=(const ComboRenderPipelineDescriptor&) = delete;
+ ComboRenderPipelineDescriptor(ComboRenderPipelineDescriptor&&) = delete;
+ ComboRenderPipelineDescriptor& operator=(ComboRenderPipelineDescriptor&&) = delete;
+
+ dawn::ProgrammableStageDescriptor cFragmentStage;
ComboVertexInputDescriptor cVertexInput;
dawn::RasterizationStateDescriptor cRasterizationState;
- std::array<dawn::ColorStateDescriptor*, kMaxColorAttachments> cColorStates;
+ std::array<dawn::ColorStateDescriptor, kMaxColorAttachments> cColorStates;
dawn::DepthStencilStateDescriptor cDepthStencilState;
-
- private:
- dawn::ColorStateDescriptor mColorStates[kMaxColorAttachments];
};
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/DawnHelpers.cpp b/chromium/third_party/dawn/src/utils/DawnHelpers.cpp
index 25a8d733169..1ad9adf09e7 100644
--- a/chromium/third_party/dawn/src/utils/DawnHelpers.cpp
+++ b/chromium/third_party/dawn/src/utils/DawnHelpers.cpp
@@ -128,13 +128,11 @@ namespace utils {
ComboRenderPassDescriptor::ComboRenderPassDescriptor(
std::initializer_list<dawn::TextureView> colorAttachmentInfo,
- dawn::TextureView depthStencil)
- : cColorAttachmentsInfoPtr() {
+ dawn::TextureView depthStencil) {
for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- mColorAttachmentsInfo[i].loadOp = dawn::LoadOp::Clear;
- mColorAttachmentsInfo[i].storeOp = dawn::StoreOp::Store;
- mColorAttachmentsInfo[i].clearColor = {0.0f, 0.0f, 0.0f, 0.0f};
- cColorAttachmentsInfoPtr[i] = nullptr;
+ cColorAttachments[i].loadOp = dawn::LoadOp::Clear;
+ cColorAttachments[i].storeOp = dawn::StoreOp::Store;
+ cColorAttachments[i].clearColor = {0.0f, 0.0f, 0.0f, 0.0f};
}
cDepthStencilAttachmentInfo.clearDepth = 1.0f;
@@ -148,13 +146,11 @@ namespace utils {
uint32_t colorAttachmentIndex = 0;
for (const dawn::TextureView& colorAttachment : colorAttachmentInfo) {
if (colorAttachment.Get() != nullptr) {
- mColorAttachmentsInfo[colorAttachmentIndex].attachment = colorAttachment;
- cColorAttachmentsInfoPtr[colorAttachmentIndex] =
- &mColorAttachmentsInfo[colorAttachmentIndex];
+ cColorAttachments[colorAttachmentIndex].attachment = colorAttachment;
}
++colorAttachmentIndex;
}
- colorAttachments = cColorAttachmentsInfoPtr;
+ colorAttachments = cColorAttachments.data();
if (depthStencil.Get() != nullptr) {
cDepthStencilAttachmentInfo.attachment = depthStencil;
@@ -167,19 +163,10 @@ namespace utils {
const ComboRenderPassDescriptor& ComboRenderPassDescriptor::operator=(
const ComboRenderPassDescriptor& otherRenderPass) {
cDepthStencilAttachmentInfo = otherRenderPass.cDepthStencilAttachmentInfo;
- mColorAttachmentsInfo = otherRenderPass.mColorAttachmentsInfo;
-
+ cColorAttachments = otherRenderPass.cColorAttachments;
colorAttachmentCount = otherRenderPass.colorAttachmentCount;
- // Assign the pointers in colorAttachmentsInfoPtr to items in this->mColorAttachmentsInfo
- for (uint32_t i = 0; i < colorAttachmentCount; ++i) {
- if (otherRenderPass.cColorAttachmentsInfoPtr[i] != nullptr) {
- cColorAttachmentsInfoPtr[i] = &mColorAttachmentsInfo[i];
- } else {
- cColorAttachmentsInfoPtr[i] = nullptr;
- }
- }
- colorAttachments = cColorAttachmentsInfoPtr;
+ colorAttachments = cColorAttachments.data();
if (otherRenderPass.depthStencilAttachment != nullptr) {
// Assign desc.depthStencilAttachment to this->depthStencilAttachmentInfo;
diff --git a/chromium/third_party/dawn/src/utils/DawnHelpers.h b/chromium/third_party/dawn/src/utils/DawnHelpers.h
index 4011bc41de5..337ae8508c5 100644
--- a/chromium/third_party/dawn/src/utils/DawnHelpers.h
+++ b/chromium/third_party/dawn/src/utils/DawnHelpers.h
@@ -61,12 +61,9 @@ namespace utils {
const ComboRenderPassDescriptor& operator=(
const ComboRenderPassDescriptor& otherRenderPass);
- dawn::RenderPassColorAttachmentDescriptor* cColorAttachmentsInfoPtr[kMaxColorAttachments];
- dawn::RenderPassDepthStencilAttachmentDescriptor cDepthStencilAttachmentInfo;
-
- private:
std::array<dawn::RenderPassColorAttachmentDescriptor, kMaxColorAttachments>
- mColorAttachmentsInfo;
+ cColorAttachments;
+ dawn::RenderPassDepthStencilAttachmentDescriptor cDepthStencilAttachmentInfo;
};
struct BasicRenderPass {
diff --git a/chromium/third_party/dawn/src/utils/Glfw3Fuchsia.cpp b/chromium/third_party/dawn/src/utils/Glfw3Fuchsia.cpp
new file mode 100644
index 00000000000..cc8ed3ba03c
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/Glfw3Fuchsia.cpp
@@ -0,0 +1,100 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A mock GLFW implementation that supports Fuchsia, but only implements
+// the functions called from Dawn.
+
+// NOTE: This must be included before GLFW/glfw3.h because the latter will
+// include <vulkan/vulkan.h> and "common/vulkan_platform.h" wants to be
+// the first header to do so for sanity reasons (e.g. undefining weird
+// macros on Windows and Linux).
+// clang-format off
+#include "common/vulkan_platform.h"
+#include "common/Assert.h"
+#include <GLFW/glfw3.h>
+// clang-format on
+
+#include <dlfcn.h>
+
+int glfwInit(void) {
+ return GLFW_TRUE;
+}
+
+void glfwDefaultWindowHints(void) {
+}
+
+void glfwWindowHint(int hint, int value) {
+ DAWN_UNUSED(hint);
+ DAWN_UNUSED(value);
+}
+
+struct GLFWwindow {
+ PFN_vkGetInstanceProcAddr GetInstanceProcAddress = nullptr;
+ void* vulkan_loader = nullptr;
+
+ GLFWwindow() {
+ vulkan_loader = ::dlopen("libvulkan.so", RTLD_NOW);
+ ASSERT(vulkan_loader != nullptr);
+ GetInstanceProcAddress = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
+ dlsym(vulkan_loader, "vkGetInstanceProcAddr"));
+ ASSERT(GetInstanceProcAddress != nullptr);
+ }
+
+ ~GLFWwindow() {
+ if (vulkan_loader) {
+ ::dlclose(vulkan_loader);
+ }
+ vulkan_loader = nullptr;
+ }
+};
+
+GLFWwindow* glfwCreateWindow(int width,
+ int height,
+ const char* title,
+ GLFWmonitor* monitor,
+ GLFWwindow* share) {
+ ASSERT(monitor == nullptr);
+ ASSERT(share == nullptr);
+ DAWN_UNUSED(width);
+ DAWN_UNUSED(height);
+ DAWN_UNUSED(title);
+ return new GLFWwindow();
+}
+
+VkResult glfwCreateWindowSurface(VkInstance instance,
+ GLFWwindow* window,
+ const VkAllocationCallbacks* allocator,
+ VkSurfaceKHR* surface) {
+ // IMPORTANT: This assumes that the VkInstance was created with a Fuchsia
+ // swapchain layer enabled, as well as the corresponding extension that
+ // is queried here to perform the surface creation. Dawn should do all
+ // required steps in VulkanInfo.cpp, VulkanFunctions.cpp and BackendVk.cpp.
+
+ auto vkCreateImagePipeSurfaceFUCHSIA = reinterpret_cast<PFN_vkCreateImagePipeSurfaceFUCHSIA>(
+ window->GetInstanceProcAddress(instance, "vkCreateImagePipeSurfaceFUCHSIA"));
+ ASSERT(vkCreateImagePipeSurfaceFUCHSIA != nullptr);
+ if (!vkCreateImagePipeSurfaceFUCHSIA) {
+ *surface = VK_NULL_HANDLE;
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ const struct VkImagePipeSurfaceCreateInfoFUCHSIA create_info = {
+ VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA,
+ nullptr, // pNext
+ 0, // flags, ignored for now
+ ZX_HANDLE_INVALID, // imagePipeHandle, a null handle matches the framebuffer.
+ };
+
+ return vkCreateImagePipeSurfaceFUCHSIA(instance, &create_info, nullptr, surface);
+}
diff --git a/chromium/third_party/dawn/src/utils/MetalBinding.mm b/chromium/third_party/dawn/src/utils/MetalBinding.mm
index 4f9b5bed1af..0dbdb4d7381 100644
--- a/chromium/third_party/dawn/src/utils/MetalBinding.mm
+++ b/chromium/third_party/dawn/src/utils/MetalBinding.mm
@@ -39,7 +39,7 @@ namespace utils {
void Init(DawnWSIContextMetal* ctx) {
mMtlDevice = ctx->device;
- mCommandQueue = [mMtlDevice newCommandQueue];
+ mCommandQueue = ctx->queue;
}
DawnSwapChainError Configure(DawnTextureFormat format,